aboutsummaryrefslogtreecommitdiff
path: root/src/arch/wasm/CodeGen.zig
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/wasm/CodeGen.zig')
-rw-r--r--src/arch/wasm/CodeGen.zig2569
1 files changed, 1515 insertions, 1054 deletions
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 11969d567a..495ca7f6dd 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -11,6 +11,7 @@ const log = std.log.scoped(.codegen);
const codegen = @import("../../codegen.zig");
const Module = @import("../../Module.zig");
+const InternPool = @import("../../InternPool.zig");
const Decl = Module.Decl;
const Type = @import("../../type.zig").Type;
const Value = @import("../../value.zig").Value;
@@ -29,6 +30,9 @@ const errUnionErrorOffset = codegen.errUnionErrorOffset;
/// Wasm Value, created when generating an instruction
const WValue = union(enum) {
+ /// `WValue` which has been freed and may no longer hold
+ /// any references.
+ dead: void,
/// May be referenced but is unused
none: void,
/// The value lives on top of the stack
@@ -86,6 +90,7 @@ const WValue = union(enum) {
fn offset(value: WValue) u32 {
switch (value) {
.stack_offset => |stack_offset| return stack_offset.value,
+ .dead => unreachable,
else => return 0,
}
}
@@ -123,7 +128,8 @@ const WValue = union(enum) {
.f64 => gen.free_locals_f64.append(gen.gpa, local_value) catch return,
.v128 => gen.free_locals_v128.append(gen.gpa, local_value) catch return,
}
- value.* = undefined;
+ log.debug("freed local ({d}) of type {}", .{ local_value, valtype });
+ value.* = .dead;
}
};
@@ -759,8 +765,9 @@ pub fn deinit(func: *CodeGen) void {
/// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig
fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError {
+ const mod = func.bin_file.base.options.module.?;
const src = LazySrcLoc.nodeOffset(0);
- const src_loc = src.toSrcLoc(func.decl);
+ const src_loc = src.toSrcLoc(func.decl, mod);
func.err_msg = try Module.ErrorMsg.create(func.gpa, src_loc, fmt, args);
return error.CodegenFail;
}
@@ -783,9 +790,10 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref);
assert(!gop.found_existing);
- const val = func.air.value(ref).?;
- const ty = func.air.typeOf(ref);
- if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt() and !ty.isError()) {
+ const mod = func.bin_file.base.options.module.?;
+ const val = (try func.air.value(ref, mod)).?;
+ const ty = func.typeOf(ref);
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) {
gop.value_ptr.* = WValue{ .none = {} };
return gop.value_ptr.*;
}
@@ -796,7 +804,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
//
// In the other cases, we will simply lower the constant to a value that fits
// into a single local (such as a pointer, integer, bool, etc).
- const result = if (isByRef(ty, func.target)) blk: {
+ const result = if (isByRef(ty, mod)) blk: {
const sym_index = try func.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, func.decl_index);
break :blk WValue{ .memory = sym_index };
} else try func.lowerConstant(val, ty);
@@ -832,6 +840,7 @@ const Branch = struct {
fn deinit(branch: *Branch, gpa: Allocator) void {
branch.values.deinit(gpa);
+ branch.* = undefined;
}
};
@@ -874,13 +883,17 @@ fn iterateBigTomb(func: *CodeGen, inst: Air.Inst.Index, operand_count: usize) !B
fn processDeath(func: *CodeGen, ref: Air.Inst.Ref) void {
const inst = Air.refToIndex(ref) orelse return;
- if (func.air.instructions.items(.tag)[inst] == .constant) return;
+ assert(func.air.instructions.items(.tag)[inst] != .interned);
// Branches are currently only allowed to free locals allocated
// within their own branch.
// TODO: Upon branch consolidation free any locals if needed.
const value = func.currentBranch().values.getPtr(ref) orelse return;
if (value.* != .local) return;
- log.debug("Decreasing reference for ref: %{?d}\n", .{Air.refToIndex(ref)});
+ const reserved_indexes = func.args.len + @boolToInt(func.return_value != .none);
+ if (value.local.value < reserved_indexes) {
+ return; // function arguments can never be re-used
+ }
+ log.debug("Decreasing reference for ref: %{?d}, using local '{d}'", .{ Air.refToIndex(ref), value.local.value });
value.local.references -= 1; // if this panics, a call to `reuseOperand` was forgotten by the developer
if (value.local.references == 0) {
value.free(func);
@@ -977,8 +990,9 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
}
/// Using a given `Type`, returns the corresponding type
-fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype {
- return switch (ty.zigTypeTag()) {
+fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
+ const target = mod.getTarget();
+ return switch (ty.zigTypeTag(mod)) {
.Float => blk: {
const bits = ty.floatBits(target);
if (bits == 16) return wasm.Valtype.i32; // stored/loaded as u16
@@ -988,44 +1002,52 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype {
return wasm.Valtype.i32; // represented as pointer to stack
},
.Int, .Enum => blk: {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
if (info.bits <= 32) break :blk wasm.Valtype.i32;
if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64;
break :blk wasm.Valtype.i32; // represented as pointer to stack
},
- .Struct => switch (ty.containerLayout()) {
+ .Struct => switch (ty.containerLayout(mod)) {
.Packed => {
- const struct_obj = ty.castTag(.@"struct").?.data;
- return typeToValtype(struct_obj.backing_int_ty, target);
+ const struct_obj = mod.typeToStruct(ty).?;
+ return typeToValtype(struct_obj.backing_int_ty, mod);
},
else => wasm.Valtype.i32,
},
- .Vector => switch (determineSimdStoreStrategy(ty, target)) {
+ .Vector => switch (determineSimdStoreStrategy(ty, mod)) {
.direct => wasm.Valtype.v128,
.unrolled => wasm.Valtype.i32,
},
+ .Union => switch (ty.containerLayout(mod)) {
+ .Packed => {
+ const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory");
+ return typeToValtype(int_ty, mod);
+ },
+ else => wasm.Valtype.i32,
+ },
else => wasm.Valtype.i32, // all represented as reference/immediate
};
}
/// Using a given `Type`, returns the byte representation of its wasm value type
-fn genValtype(ty: Type, target: std.Target) u8 {
- return wasm.valtype(typeToValtype(ty, target));
+fn genValtype(ty: Type, mod: *Module) u8 {
+ return wasm.valtype(typeToValtype(ty, mod));
}
/// Using a given `Type`, returns the corresponding wasm value type
/// Differently from `genValtype` this also allows `void` to create a block
/// with no return type
-fn genBlockType(ty: Type, target: std.Target) u8 {
- return switch (ty.tag()) {
- .void, .noreturn => wasm.block_empty,
- else => genValtype(ty, target),
+fn genBlockType(ty: Type, mod: *Module) u8 {
+ return switch (ty.ip_index) {
+ .void_type, .noreturn_type => wasm.block_empty,
+ else => genValtype(ty, mod),
};
}
/// Writes the bytecode depending on the given `WValue` in `val`
fn emitWValue(func: *CodeGen, value: WValue) InnerError!void {
switch (value) {
+ .dead => unreachable, // reference to free'd `WValue` (missing reuseOperand?)
.none, .stack => {}, // no-op
.local => |idx| try func.addLabel(.local_get, idx.value),
.imm32 => |val| try func.addImm32(@bitCast(i32, val)),
@@ -1079,30 +1101,31 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue {
/// Creates one locals for a given `Type`.
/// Returns a corresponding `Wvalue` with `local` as active tag
fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
- const valtype = typeToValtype(ty, func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const valtype = typeToValtype(ty, mod);
switch (valtype) {
.i32 => if (func.free_locals_i32.popOrNull()) |index| {
- log.debug("reusing local ({d}) of type {}\n", .{ index, valtype });
+ log.debug("reusing local ({d}) of type {}", .{ index, valtype });
return WValue{ .local = .{ .value = index, .references = 1 } };
},
.i64 => if (func.free_locals_i64.popOrNull()) |index| {
- log.debug("reusing local ({d}) of type {}\n", .{ index, valtype });
+ log.debug("reusing local ({d}) of type {}", .{ index, valtype });
return WValue{ .local = .{ .value = index, .references = 1 } };
},
.f32 => if (func.free_locals_f32.popOrNull()) |index| {
- log.debug("reusing local ({d}) of type {}\n", .{ index, valtype });
+ log.debug("reusing local ({d}) of type {}", .{ index, valtype });
return WValue{ .local = .{ .value = index, .references = 1 } };
},
.f64 => if (func.free_locals_f64.popOrNull()) |index| {
- log.debug("reusing local ({d}) of type {}\n", .{ index, valtype });
+ log.debug("reusing local ({d}) of type {}", .{ index, valtype });
return WValue{ .local = .{ .value = index, .references = 1 } };
},
.v128 => if (func.free_locals_v128.popOrNull()) |index| {
- log.debug("reusing local ({d}) of type {}\n", .{ index, valtype });
+ log.debug("reusing local ({d}) of type {}", .{ index, valtype });
return WValue{ .local = .{ .value = index, .references = 1 } };
},
}
- log.debug("new local of type {}\n", .{valtype});
+ log.debug("new local of type {}", .{valtype});
// no local was free to be re-used, so allocate a new local instead
return func.ensureAllocLocal(ty);
}
@@ -1110,7 +1133,8 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
/// Ensures a new local will be created. This is useful when it's useful
/// to use a zero-initialized local.
fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
- try func.locals.append(func.gpa, genValtype(ty, func.target));
+ const mod = func.bin_file.base.options.module.?;
+ try func.locals.append(func.gpa, genValtype(ty, mod));
const initial_index = func.local_index;
func.local_index += 1;
return WValue{ .local = .{ .value = initial_index, .references = 1 } };
@@ -1118,48 +1142,55 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
/// Generates a `wasm.Type` from a given function type.
/// Memory is owned by the caller.
-fn genFunctype(gpa: Allocator, cc: std.builtin.CallingConvention, params: []const Type, return_type: Type, target: std.Target) !wasm.Type {
+fn genFunctype(
+ gpa: Allocator,
+ cc: std.builtin.CallingConvention,
+ params: []const InternPool.Index,
+ return_type: Type,
+ mod: *Module,
+) !wasm.Type {
var temp_params = std.ArrayList(wasm.Valtype).init(gpa);
defer temp_params.deinit();
var returns = std.ArrayList(wasm.Valtype).init(gpa);
defer returns.deinit();
- if (firstParamSRet(cc, return_type, target)) {
+ if (firstParamSRet(cc, return_type, mod)) {
try temp_params.append(.i32); // memory address is always a 32-bit handle
- } else if (return_type.hasRuntimeBitsIgnoreComptime()) {
+ } else if (return_type.hasRuntimeBitsIgnoreComptime(mod)) {
if (cc == .C) {
- const res_classes = abi.classifyType(return_type, target);
+ const res_classes = abi.classifyType(return_type, mod);
assert(res_classes[0] == .direct and res_classes[1] == .none);
- const scalar_type = abi.scalarType(return_type, target);
- try returns.append(typeToValtype(scalar_type, target));
+ const scalar_type = abi.scalarType(return_type, mod);
+ try returns.append(typeToValtype(scalar_type, mod));
} else {
- try returns.append(typeToValtype(return_type, target));
+ try returns.append(typeToValtype(return_type, mod));
}
- } else if (return_type.isError()) {
+ } else if (return_type.isError(mod)) {
try returns.append(.i32);
}
// param types
- for (params) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
+ for (params) |param_type_ip| {
+ const param_type = param_type_ip.toType();
+ if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
switch (cc) {
.C => {
- const param_classes = abi.classifyType(param_type, target);
+ const param_classes = abi.classifyType(param_type, mod);
for (param_classes) |class| {
if (class == .none) continue;
if (class == .direct) {
- const scalar_type = abi.scalarType(param_type, target);
- try temp_params.append(typeToValtype(scalar_type, target));
+ const scalar_type = abi.scalarType(param_type, mod);
+ try temp_params.append(typeToValtype(scalar_type, mod));
} else {
- try temp_params.append(typeToValtype(param_type, target));
+ try temp_params.append(typeToValtype(param_type, mod));
}
}
},
- else => if (isByRef(param_type, target))
+ else => if (isByRef(param_type, mod))
try temp_params.append(.i32)
else
- try temp_params.append(typeToValtype(param_type, target)),
+ try temp_params.append(typeToValtype(param_type, mod)),
}
}
@@ -1172,20 +1203,22 @@ fn genFunctype(gpa: Allocator, cc: std.builtin.CallingConvention, params: []cons
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- func: *Module.Fn,
+ func_index: Module.Fn.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: codegen.DebugInfoOutput,
) codegen.CodeGenError!codegen.Result {
_ = src_loc;
+ const mod = bin_file.options.module.?;
+ const func = mod.funcPtr(func_index);
var code_gen: CodeGen = .{
.gpa = bin_file.allocator,
.air = air,
.liveness = liveness,
.code = code,
.decl_index = func.owner_decl,
- .decl = bin_file.options.module.?.declPtr(func.owner_decl),
+ .decl = mod.declPtr(func.owner_decl),
.err_msg = undefined,
.locals = .{},
.target = bin_file.options.target,
@@ -1204,8 +1237,9 @@ pub fn generate(
}
fn genFunc(func: *CodeGen) InnerError!void {
- const fn_info = func.decl.ty.fnInfo();
- var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const fn_info = mod.typeToFunc(func.decl.ty).?;
+ var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod);
defer func_type.deinit(func.gpa);
_ = try func.bin_file.storeDeclType(func.decl_index, func_type);
@@ -1222,6 +1256,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
defer {
var outer_branch = func.branches.pop();
outer_branch.deinit(func.gpa);
+ assert(func.branches.items.len == 0); // missing branch merge
}
// Generate MIR for function body
try func.genBody(func.air.getMainBody());
@@ -1230,8 +1265,8 @@ fn genFunc(func: *CodeGen) InnerError!void {
// we emit an unreachable instruction to tell the stack validator that part will never be reached.
if (func_type.returns.len != 0 and func.air.instructions.len > 0) {
const inst = @intCast(u32, func.air.instructions.len - 1);
- const last_inst_ty = func.air.typeOfIndex(inst);
- if (!last_inst_ty.hasRuntimeBitsIgnoreComptime() or last_inst_ty.isNoReturn()) {
+ const last_inst_ty = func.typeOfIndex(inst);
+ if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn(mod)) {
try func.addTag(.@"unreachable");
}
}
@@ -1242,7 +1277,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
// check if we have to initialize and allocate anything into the stack frame.
// If so, create enough stack space and insert the instructions at the front of the list.
- if (func.stack_size > 0) {
+ if (func.initial_stack_value != .none) {
var prologue = std.ArrayList(Mir.Inst).init(func.gpa);
defer prologue.deinit();
@@ -1251,7 +1286,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
// store stack pointer so we can restore it when we return from the function
try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } });
// get the total stack size
- const aligned_stack = std.mem.alignForwardGeneric(u32, func.stack_size, func.stack_alignment);
+ const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment);
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } });
// substract it from the current stack pointer
try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
@@ -1312,10 +1347,9 @@ const CallWValues = struct {
};
fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues {
- const cc = fn_ty.fnCallingConvention();
- const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen());
- defer func.gpa.free(param_types);
- fn_ty.fnParamTypes(param_types);
+ const mod = func.bin_file.base.options.module.?;
+ const fn_info = mod.typeToFunc(fn_ty).?;
+ const cc = fn_info.cc;
var result: CallWValues = .{
.args = &.{},
.return_value = .none,
@@ -1327,8 +1361,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
// Check if we store the result as a pointer to the stack rather than
// by value
- const fn_info = fn_ty.fnInfo();
- if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) {
+ if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) {
// the sret arg will be passed as first argument, therefore we
// set the `return_value` before allocating locals for regular args.
result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } };
@@ -1337,8 +1370,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
switch (cc) {
.Unspecified => {
- for (param_types) |ty| {
- if (!ty.hasRuntimeBitsIgnoreComptime()) {
+ for (fn_info.param_types) |ty| {
+ if (!ty.toType().hasRuntimeBitsIgnoreComptime(mod)) {
continue;
}
@@ -1347,8 +1380,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
}
},
.C => {
- for (param_types) |ty| {
- const ty_classes = abi.classifyType(ty, func.target);
+ for (fn_info.param_types) |ty| {
+ const ty_classes = abi.classifyType(ty.toType(), mod);
for (ty_classes) |class| {
if (class == .none) continue;
try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
@@ -1362,11 +1395,11 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
return result;
}
-fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, target: std.Target) bool {
+fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *Module) bool {
switch (cc) {
- .Unspecified, .Inline => return isByRef(return_type, target),
+ .Unspecified, .Inline => return isByRef(return_type, mod),
.C => {
- const ty_classes = abi.classifyType(return_type, target);
+ const ty_classes = abi.classifyType(return_type, mod);
if (ty_classes[0] == .indirect) return true;
if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true;
return false;
@@ -1382,16 +1415,17 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
return func.lowerToStack(value);
}
- const ty_classes = abi.classifyType(ty, func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const ty_classes = abi.classifyType(ty, mod);
assert(ty_classes[0] != .none);
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Struct, .Union => {
if (ty_classes[0] == .indirect) {
return func.lowerToStack(value);
}
assert(ty_classes[0] == .direct);
- const scalar_type = abi.scalarType(ty, func.target);
- const abi_size = scalar_type.abiSize(func.target);
+ const scalar_type = abi.scalarType(ty, mod);
+ const abi_size = scalar_type.abiSize(mod);
try func.emitWValue(value);
// When the value lives in the virtual stack, we must load it onto the actual stack
@@ -1399,12 +1433,12 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
const opcode = buildOpcode(.{
.op = .load,
.width = @intCast(u8, abi_size),
- .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned,
- .valtype1 = typeToValtype(scalar_type, func.target),
+ .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
+ .valtype1 = typeToValtype(scalar_type, mod),
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = value.offset(),
- .alignment = scalar_type.abiAlignment(func.target),
+ .alignment = scalar_type.abiAlignment(mod),
});
}
},
@@ -1413,7 +1447,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
return func.lowerToStack(value);
}
assert(ty_classes[0] == .direct and ty_classes[1] == .direct);
- assert(ty.abiSize(func.target) == 16);
+ assert(ty.abiSize(mod) == 16);
// in this case we have an integer or float that must be lowered as 2 i64's.
try func.emitWValue(value);
try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 });
@@ -1480,24 +1514,24 @@ fn restoreStackPointer(func: *CodeGen) !void {
///
/// Asserts Type has codegenbits
fn allocStack(func: *CodeGen, ty: Type) !WValue {
- assert(ty.hasRuntimeBitsIgnoreComptime());
+ const mod = func.bin_file.base.options.module.?;
+ assert(ty.hasRuntimeBitsIgnoreComptime(mod));
if (func.initial_stack_value == .none) {
try func.initializeStack();
}
- const abi_size = std.math.cast(u32, ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
+ const abi_size = std.math.cast(u32, ty.abiSize(mod)) orelse {
return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
- ty.fmt(module), ty.abiSize(func.target),
+ ty.fmt(mod), ty.abiSize(mod),
});
};
- const abi_align = ty.abiAlignment(func.target);
+ const abi_align = ty.abiAlignment(mod);
if (abi_align > func.stack_alignment) {
func.stack_alignment = abi_align;
}
- const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_align);
+ const offset = std.mem.alignForward(u32, func.stack_size, abi_align);
defer func.stack_size = offset + abi_size;
return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
@@ -1508,29 +1542,29 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
/// This is different from allocStack where this will use the pointer's alignment
/// if it is set, to ensure the stack alignment will be set correctly.
fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
- const ptr_ty = func.air.typeOfIndex(inst);
- const pointee_ty = ptr_ty.childType();
+ const mod = func.bin_file.base.options.module.?;
+ const ptr_ty = func.typeOfIndex(inst);
+ const pointee_ty = ptr_ty.childType(mod);
if (func.initial_stack_value == .none) {
try func.initializeStack();
}
- if (!pointee_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pointee_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.allocStack(Type.usize); // create a value containing just the stack pointer.
}
- const abi_alignment = ptr_ty.ptrAlignment(func.target);
- const abi_size = std.math.cast(u32, pointee_ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
+ const abi_alignment = ptr_ty.ptrAlignment(mod);
+ const abi_size = std.math.cast(u32, pointee_ty.abiSize(mod)) orelse {
return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
- pointee_ty.fmt(module), pointee_ty.abiSize(func.target),
+ pointee_ty.fmt(mod), pointee_ty.abiSize(mod),
});
};
if (abi_alignment > func.stack_alignment) {
func.stack_alignment = abi_alignment;
}
- const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_alignment);
+ const offset = std.mem.alignForward(u32, func.stack_size, abi_alignment);
defer func.stack_size = offset + abi_size;
return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
@@ -1593,10 +1627,16 @@ fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
else => {},
}
- // TODO: We should probably lower this to a call to compiler_rt
- // But for now, we implement it manually
- var offset = try func.ensureAllocLocal(Type.usize); // local for counter
+ // allocate a local for the offset, and set it to 0.
+ // This to ensure that inside loops we correctly re-set the counter.
+ var offset = try func.allocLocal(Type.usize); // local for counter
defer offset.free(func);
+ switch (func.arch()) {
+ .wasm32 => try func.addImm32(0),
+ .wasm64 => try func.addImm64(0),
+ else => unreachable,
+ }
+ try func.addLabel(.local_set, offset.local.value);
// outer block to jump to when loop is done
try func.startBlock(.block, wasm.block_empty);
@@ -1666,7 +1706,7 @@ fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
}
fn ptrSize(func: *const CodeGen) u16 {
- return @divExact(func.target.cpu.arch.ptrBitWidth(), 8);
+ return @divExact(func.target.ptrBitWidth(), 8);
}
fn arch(func: *const CodeGen) std.Target.Cpu.Arch {
@@ -1675,8 +1715,9 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch {
/// For a given `Type`, will return true when the type will be passed
/// by reference, rather than by value
-fn isByRef(ty: Type, target: std.Target) bool {
- switch (ty.zigTypeTag()) {
+fn isByRef(ty: Type, mod: *Module) bool {
+ const target = mod.getTarget();
+ switch (ty.zigTypeTag(mod)) {
.Type,
.ComptimeInt,
.ComptimeFloat,
@@ -1697,37 +1738,42 @@ fn isByRef(ty: Type, target: std.Target) bool {
.Array,
.Frame,
- .Union,
- => return ty.hasRuntimeBitsIgnoreComptime(),
+ => return ty.hasRuntimeBitsIgnoreComptime(mod),
+ .Union => {
+ if (mod.typeToUnion(ty)) |union_obj| {
+ if (union_obj.layout == .Packed) {
+ return ty.abiSize(mod) > 8;
+ }
+ }
+ return ty.hasRuntimeBitsIgnoreComptime(mod);
+ },
.Struct => {
- if (ty.castTag(.@"struct")) |struct_ty| {
- const struct_obj = struct_ty.data;
+ if (mod.typeToStruct(ty)) |struct_obj| {
if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
- return isByRef(struct_obj.backing_int_ty, target);
+ return isByRef(struct_obj.backing_int_ty, mod);
}
}
- return ty.hasRuntimeBitsIgnoreComptime();
+ return ty.hasRuntimeBitsIgnoreComptime(mod);
},
- .Vector => return determineSimdStoreStrategy(ty, target) == .unrolled,
- .Int => return ty.intInfo(target).bits > 64,
+ .Vector => return determineSimdStoreStrategy(ty, mod) == .unrolled,
+ .Int => return ty.intInfo(mod).bits > 64,
.Float => return ty.floatBits(target) > 64,
.ErrorUnion => {
- const pl_ty = ty.errorUnionPayload();
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ const pl_ty = ty.errorUnionPayload(mod);
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return false;
}
return true;
},
.Optional => {
- if (ty.isPtrLikeOptional()) return false;
- var buf: Type.Payload.ElemType = undefined;
- const pl_type = ty.optionalChild(&buf);
- if (pl_type.zigTypeTag() == .ErrorSet) return false;
- return pl_type.hasRuntimeBitsIgnoreComptime();
+ if (ty.isPtrLikeOptional(mod)) return false;
+ const pl_type = ty.optionalChild(mod);
+ if (pl_type.zigTypeTag(mod) == .ErrorSet) return false;
+ return pl_type.hasRuntimeBitsIgnoreComptime(mod);
},
.Pointer => {
// Slices act like struct and will be passed by reference
- if (ty.isSlice()) return true;
+ if (ty.isSlice(mod)) return true;
return false;
},
}
@@ -1742,10 +1788,11 @@ const SimdStoreStrategy = enum {
/// This means when a given type is 128 bits and either the simd128 or relaxed-simd
/// features are enabled, the function will return `.direct`. This would allow to store
/// it using a instruction, rather than an unrolled version.
-fn determineSimdStoreStrategy(ty: Type, target: std.Target) SimdStoreStrategy {
- std.debug.assert(ty.zigTypeTag() == .Vector);
- if (ty.bitSize(target) != 128) return .unrolled;
+fn determineSimdStoreStrategy(ty: Type, mod: *Module) SimdStoreStrategy {
+ std.debug.assert(ty.zigTypeTag(mod) == .Vector);
+ if (ty.bitSize(mod) != 128) return .unrolled;
const hasFeature = std.Target.wasm.featureSetHas;
+ const target = mod.getTarget();
const features = target.cpu.features;
if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) {
return .direct;
@@ -1785,8 +1832,7 @@ fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: en
fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const air_tags = func.air.instructions.items(.tag);
return switch (air_tags[inst]) {
- .constant => unreachable,
- .const_ty => unreachable,
+ .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable,
.add => func.airBinOp(inst, .add),
.add_sat => func.airSatBinOp(inst, .add),
@@ -1796,10 +1842,8 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.subwrap => func.airWrapBinOp(inst, .sub),
.mul => func.airBinOp(inst, .mul),
.mulwrap => func.airWrapBinOp(inst, .mul),
- .div_float,
- .div_exact,
- .div_trunc,
- => func.airDiv(inst),
+ .div_float, .div_exact => func.airDiv(inst),
+ .div_trunc => func.airDivTrunc(inst),
.div_floor => func.airDivFloor(inst),
.bit_and => func.airBinOp(inst, .@"and"),
.bit_or => func.airBinOp(inst, .@"or"),
@@ -1963,11 +2007,11 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.tag_name => func.airTagName(inst),
.error_set_has_value => func.airErrorSetHasValue(inst),
+ .frame_addr => func.airFrameAddress(inst),
.mul_sat,
.mod,
.assembly,
- .frame_addr,
.bit_reverse,
.is_err_ptr,
.is_non_err_ptr,
@@ -2028,8 +2072,11 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
+ const ip = &mod.intern_pool;
+
for (body) |inst| {
- if (func.liveness.isUnused(inst) and !func.air.mustLower(inst)) {
+ if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) {
continue;
}
const old_bookkeeping_value = func.air_bookkeeping;
@@ -2046,36 +2093,37 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
}
fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
- const fn_info = func.decl.ty.fnInfo();
- const ret_ty = fn_info.return_type;
+ const fn_info = mod.typeToFunc(func.decl.ty).?;
+ const ret_ty = fn_info.return_type.toType();
// result must be stored in the stack and we return a pointer
// to the stack instead
if (func.return_value != .none) {
try func.store(func.return_value, operand, ret_ty, 0);
- } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime()) {
- switch (ret_ty.zigTypeTag()) {
+ } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ switch (ret_ty.zigTypeTag(mod)) {
// Aggregate types can be lowered as a singular value
.Struct, .Union => {
- const scalar_type = abi.scalarType(ret_ty, func.target);
+ const scalar_type = abi.scalarType(ret_ty, mod);
try func.emitWValue(operand);
const opcode = buildOpcode(.{
.op = .load,
- .width = @intCast(u8, scalar_type.abiSize(func.target) * 8),
- .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned,
- .valtype1 = typeToValtype(scalar_type, func.target),
+ .width = @intCast(u8, scalar_type.abiSize(mod) * 8),
+ .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
+ .valtype1 = typeToValtype(scalar_type, mod),
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = operand.offset(),
- .alignment = scalar_type.abiAlignment(func.target),
+ .alignment = scalar_type.abiAlignment(mod),
});
},
else => try func.emitWValue(operand),
}
} else {
- if (!ret_ty.hasRuntimeBitsIgnoreComptime() and ret_ty.isError()) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and ret_ty.isError(mod)) {
try func.addImm32(0);
} else {
try func.emitWValue(operand);
@@ -2088,15 +2136,16 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const child_type = func.air.typeOfIndex(inst).childType();
+ const mod = func.bin_file.base.options.module.?;
+ const child_type = func.typeOfIndex(inst).childType(mod);
var result = result: {
- if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime()) {
+ if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
break :result try func.allocStack(Type.usize); // create pointer to void
}
- const fn_info = func.decl.ty.fnInfo();
- if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) {
+ const fn_info = mod.typeToFunc(func.decl.ty).?;
+ if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) {
break :result func.return_value;
}
@@ -2107,19 +2156,17 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
- const ret_ty = func.air.typeOf(un_op).childType();
- if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
- if (ret_ty.isError()) {
+ const ret_ty = func.typeOf(un_op).childType(mod);
+
+ const fn_info = mod.typeToFunc(func.decl.ty).?;
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (ret_ty.isError(mod)) {
try func.addImm32(0);
- } else {
- return func.finishAir(inst, .none, &.{});
}
- }
-
- const fn_info = func.decl.ty.fnInfo();
- if (!firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) {
+ } else if (!firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) {
// leave on the stack
_ = try func.load(operand, ret_ty, 0);
}
@@ -2134,42 +2181,48 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
const extra = func.air.extraData(Air.Call, pl_op.payload);
const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]);
- const ty = func.air.typeOf(pl_op.operand);
+ const ty = func.typeOf(pl_op.operand);
- const fn_ty = switch (ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
- .Pointer => ty.childType(),
+ .Pointer => ty.childType(mod),
else => unreachable,
};
- const ret_ty = fn_ty.fnReturnType();
- const fn_info = fn_ty.fnInfo();
- const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target);
+ const ret_ty = fn_ty.fnReturnType(mod);
+ const fn_info = mod.typeToFunc(fn_ty).?;
+ const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod);
const callee: ?Decl.Index = blk: {
- const func_val = func.air.value(pl_op.operand) orelse break :blk null;
- const module = func.bin_file.base.options.module.?;
-
- if (func_val.castTag(.function)) |function| {
- _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl);
- break :blk function.data.owner_decl;
- } else if (func_val.castTag(.extern_fn)) |extern_fn| {
- const ext_decl = module.declPtr(extern_fn.data.owner_decl);
- const ext_info = ext_decl.ty.fnInfo();
- var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target);
+ const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null;
+
+ if (func_val.getFunction(mod)) |function| {
+ _ = try func.bin_file.getOrCreateAtomForDecl(function.owner_decl);
+ break :blk function.owner_decl;
+ } else if (func_val.getExternFunc(mod)) |extern_func| {
+ const ext_decl = mod.declPtr(extern_func.decl);
+ const ext_info = mod.typeToFunc(ext_decl.ty).?;
+ var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod);
defer func_type.deinit(func.gpa);
- const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
+ const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl);
const atom = func.bin_file.getAtomPtr(atom_index);
- const type_index = try func.bin_file.storeDeclType(extern_fn.data.owner_decl, func_type);
+ const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type);
try func.bin_file.addOrUpdateImport(
- mem.sliceTo(ext_decl.name, 0),
+ mod.intern_pool.stringToSlice(ext_decl.name),
atom.getSymbolIndex().?,
- ext_decl.getExternFn().?.lib_name,
+ mod.intern_pool.stringToSliceUnwrap(ext_decl.getOwnedExternFunc(mod).?.lib_name),
type_index,
);
- break :blk extern_fn.data.owner_decl;
- } else if (func_val.castTag(.decl_ref)) |decl_ref| {
- _ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data);
- break :blk decl_ref.data;
+ break :blk extern_func.decl;
+ } else switch (mod.intern_pool.indexToKey(func_val.ip_index)) {
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl| {
+ _ = try func.bin_file.getOrCreateAtomForDecl(decl);
+ break :blk decl;
+ },
+ else => {},
+ },
+ else => {},
}
return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()});
};
@@ -2183,10 +2236,10 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
for (args) |arg| {
const arg_val = try func.resolveInst(arg);
- const arg_ty = func.air.typeOf(arg);
- if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ const arg_ty = func.typeOf(arg);
+ if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val);
+ try func.lowerArg(mod.typeToFunc(fn_ty).?.cc, arg_ty, arg_val);
}
if (callee) |direct| {
@@ -2195,11 +2248,11 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} else {
// in this case we call a function pointer
// so load its value onto the stack
- std.debug.assert(ty.zigTypeTag() == .Pointer);
+ std.debug.assert(ty.zigTypeTag(mod) == .Pointer);
const operand = try func.resolveInst(pl_op.operand);
try func.emitWValue(operand);
- var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
+ var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod);
defer fn_type.deinit(func.gpa);
const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type);
@@ -2207,18 +2260,18 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
}
const result_value = result_value: {
- if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
break :result_value WValue{ .none = {} };
- } else if (ret_ty.isNoReturn()) {
+ } else if (ret_ty.isNoReturn(mod)) {
try func.addTag(.@"unreachable");
break :result_value WValue{ .none = {} };
} else if (first_param_sret) {
break :result_value sret;
// TODO: Make this less fragile and optimize
- } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag() == .Struct or ret_ty.zigTypeTag() == .Union) {
+ } else if (mod.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) {
const result_local = try func.allocLocal(ret_ty);
try func.addLabel(.local_set, result_local.local.value);
- const scalar_type = abi.scalarType(ret_ty, func.target);
+ const scalar_type = abi.scalarType(ret_ty, mod);
const result = try func.allocStack(scalar_type);
try func.store(result, result_local, scalar_type, 0);
break :result_value result;
@@ -2241,6 +2294,7 @@ fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
@@ -2250,26 +2304,22 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const ptr_ty = func.air.typeOf(bin_op.lhs);
- const ptr_info = ptr_ty.ptrInfo().data;
- const ty = ptr_ty.childType();
+ const ptr_ty = func.typeOf(bin_op.lhs);
+ const ptr_info = ptr_ty.ptrInfo(mod);
+ const ty = ptr_ty.childType(mod);
if (ptr_info.host_size == 0) {
try func.store(lhs, rhs, ty, 0);
} else {
// at this point we have a non-natural alignment, we must
// load the value, and then shift+or the rhs into the result location.
- var int_ty_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = ptr_info.host_size * 8,
- };
- const int_elem_ty = Type.initPayload(&int_ty_payload.base);
+ const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8);
- if (isByRef(int_elem_ty, func.target)) {
+ if (isByRef(int_elem_ty, mod)) {
return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
}
- var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(func.target))) - 1);
+ var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(mod))) - 1);
mask <<= @intCast(u6, ptr_info.bit_offset);
mask ^= ~@as(u64, 0);
const shift_val = if (ptr_info.host_size <= 4)
@@ -2298,39 +2348,40 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
assert(!(lhs != .stack and rhs == .stack));
- switch (ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ const abi_size = ty.abiSize(mod);
+ switch (ty.zigTypeTag(mod)) {
.ErrorUnion => {
- const pl_ty = ty.errorUnionPayload();
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ const pl_ty = ty.errorUnionPayload(mod);
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.store(lhs, rhs, Type.anyerror, 0);
}
- const len = @intCast(u32, ty.abiSize(func.target));
+ const len = @intCast(u32, abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
.Optional => {
- if (ty.isPtrLikeOptional()) {
+ if (ty.isPtrLikeOptional(mod)) {
return func.store(lhs, rhs, Type.usize, 0);
}
- var buf: Type.Payload.ElemType = undefined;
- const pl_ty = ty.optionalChild(&buf);
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ const pl_ty = ty.optionalChild(mod);
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.store(lhs, rhs, Type.u8, 0);
}
- if (pl_ty.zigTypeTag() == .ErrorSet) {
+ if (pl_ty.zigTypeTag(mod) == .ErrorSet) {
return func.store(lhs, rhs, Type.anyerror, 0);
}
- const len = @intCast(u32, ty.abiSize(func.target));
+ const len = @intCast(u32, abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Struct, .Array, .Union => if (isByRef(ty, func.target)) {
- const len = @intCast(u32, ty.abiSize(func.target));
+ .Struct, .Array, .Union => if (isByRef(ty, mod)) {
+ const len = @intCast(u32, abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Vector => switch (determineSimdStoreStrategy(ty, func.target)) {
+ .Vector => switch (determineSimdStoreStrategy(ty, mod)) {
.unrolled => {
- const len = @intCast(u32, ty.abiSize(func.target));
+ const len = @intCast(u32, abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
.direct => {
@@ -2342,13 +2393,13 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_store),
offset + lhs.offset(),
- ty.abiAlignment(func.target),
+ ty.abiAlignment(mod),
});
return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
},
},
.Pointer => {
- if (ty.isSlice()) {
+ if (ty.isSlice(mod)) {
// store pointer first
// lower it to the stack so we do not have to store rhs into a local first
try func.emitWValue(lhs);
@@ -2362,7 +2413,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
return;
}
},
- .Int => if (ty.intInfo(func.target).bits > 64) {
+ .Int, .Float => if (abi_size > 8 and abi_size <= 16) {
try func.emitWValue(lhs);
const lsb = try func.load(rhs, Type.u64, 0);
try func.store(.{ .stack = {} }, lsb, Type.u64, 0 + lhs.offset());
@@ -2371,41 +2422,47 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
const msb = try func.load(rhs, Type.u64, 8);
try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset());
return;
+ } else if (abi_size > 16) {
+ try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(mod)) });
+ },
+ else => if (abi_size > 8) {
+ return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
+ ty.fmt(func.bin_file.base.options.module.?),
+ abi_size,
+ });
},
- else => {},
}
try func.emitWValue(lhs);
// In this case we're actually interested in storing the stack position
// into lhs, so we calculate that and emit that instead
try func.lowerToStack(rhs);
- const valtype = typeToValtype(ty, func.target);
- const abi_size = @intCast(u8, ty.abiSize(func.target));
-
+ const valtype = typeToValtype(ty, mod);
const opcode = buildOpcode(.{
.valtype1 = valtype,
- .width = abi_size * 8,
+ .width = @intCast(u8, abi_size * 8),
.op = .store,
});
// store rhs value at stack pointer's location in memory
try func.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
- .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(func.target) },
+ .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(mod) },
);
}
fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const ty = func.air.getRefType(ty_op.ty);
- const ptr_ty = func.air.typeOf(ty_op.operand);
- const ptr_info = ptr_ty.ptrInfo().data;
+ const ptr_ty = func.typeOf(ty_op.operand);
+ const ptr_info = ptr_ty.ptrInfo(mod);
- if (!ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{ty_op.operand});
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand});
const result = result: {
- if (isByRef(ty, func.target)) {
+ if (isByRef(ty, mod)) {
const new_local = try func.allocStack(ty);
try func.store(new_local, operand, ty, 0);
break :result new_local;
@@ -2418,11 +2475,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// at this point we have a non-natural alignment, we must
// shift the value to obtain the correct bit.
- var int_ty_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = ptr_info.host_size * 8,
- };
- const int_elem_ty = Type.initPayload(&int_ty_payload.base);
+ const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8);
const shift_val = if (ptr_info.host_size <= 4)
WValue{ .imm32 = ptr_info.bit_offset }
else if (ptr_info.host_size <= 8)
@@ -2442,25 +2495,26 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Loads an operand from the linear memory section.
/// NOTE: Leaves the value on the stack.
fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
// load local's value from memory by its stack position
try func.emitWValue(operand);
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
// TODO: Add helper functions for simd opcodes
const extra_index = @intCast(u32, func.mir_extra.items.len);
// stores as := opcode, offset, alignment (opcode::memarg)
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_load),
offset + operand.offset(),
- ty.abiAlignment(func.target),
+ ty.abiAlignment(mod),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return WValue{ .stack = {} };
}
- const abi_size = @intCast(u8, ty.abiSize(func.target));
+ const abi_size = @intCast(u8, ty.abiSize(mod));
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, func.target),
+ .valtype1 = typeToValtype(ty, mod),
.width = abi_size * 8,
.op = .load,
.signedness = .unsigned,
@@ -2468,19 +2522,20 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
try func.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
- .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(func.target) },
+ .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(mod) },
);
return WValue{ .stack = {} };
}
fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const arg_index = func.arg_index;
const arg = func.args[arg_index];
- const cc = func.decl.ty.fnInfo().cc;
- const arg_ty = func.air.typeOfIndex(inst);
+ const cc = mod.typeToFunc(func.decl.ty).?.cc;
+ const arg_ty = func.typeOfIndex(inst);
if (cc == .C) {
- const arg_classes = abi.classifyType(arg_ty, func.target);
+ const arg_classes = abi.classifyType(arg_ty, mod);
for (arg_classes) |class| {
if (class != .none) {
func.arg_index += 1;
@@ -2490,7 +2545,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When we have an argument that's passed using more than a single parameter,
// we combine them into a single stack value
if (arg_classes[0] == .direct and arg_classes[1] == .direct) {
- if (arg_ty.zigTypeTag() != .Int and arg_ty.zigTypeTag() != .Float) {
+ if (arg_ty.zigTypeTag(mod) != .Int and arg_ty.zigTypeTag(mod) != .Float) {
return func.fail(
"TODO: Implement C-ABI argument for type '{}'",
.{arg_ty.fmt(func.bin_file.base.options.module.?)},
@@ -2520,18 +2575,44 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const ty = func.air.typeOf(bin_op.lhs);
+ const lhs_ty = func.typeOf(bin_op.lhs);
+ const rhs_ty = func.typeOf(bin_op.rhs);
+
+ // For certain operations, such as shifting, the types are different.
+ // When converting this to a WebAssembly type, they *must* match to perform
+ // an operation. For this reason we verify if the WebAssembly type is different, in which
+ // case we first coerce the operands to the same type before performing the operation.
+ // For big integers we can ignore this as we will call into compiler-rt which handles this.
+ const result = switch (op) {
+ .shr, .shl => res: {
+ const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse {
+ return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
+ };
+ const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?;
+ const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: {
+ const tmp = try func.intcast(rhs, rhs_ty, lhs_ty);
+ break :blk try tmp.toLocal(func, lhs_ty);
+ } else rhs;
+ const stack_result = try func.binOp(lhs, new_rhs, lhs_ty, op);
+ break :res try stack_result.toLocal(func, lhs_ty);
+ },
+ else => res: {
+ const stack_result = try func.binOp(lhs, rhs, lhs_ty, op);
+ break :res try stack_result.toLocal(func, lhs_ty);
+ },
+ };
- const stack_value = try func.binOp(lhs, rhs, ty, op);
- func.finishAir(inst, try stack_value.toLocal(func, ty), &.{ bin_op.lhs, bin_op.rhs });
+ func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
/// Performs a binary operation on the given `WValue`'s
/// NOTE: THis leaves the value on top of the stack.
fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
assert(!(lhs != .stack and rhs == .stack));
if (ty.isAnyFloat()) {
@@ -2539,8 +2620,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
return func.floatOp(float_op, ty, &.{ lhs, rhs });
}
- if (isByRef(ty, func.target)) {
- if (ty.zigTypeTag() == .Int) {
+ if (isByRef(ty, mod)) {
+ if (ty.zigTypeTag(mod) == .Int) {
return func.binOpBigInt(lhs, rhs, ty, op);
} else {
return func.fail(
@@ -2552,8 +2633,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
const opcode: wasm.Opcode = buildOpcode(.{
.op = op,
- .valtype1 = typeToValtype(ty, func.target),
- .signedness = if (ty.isSignedInt()) .signed else .unsigned,
+ .valtype1 = typeToValtype(ty, mod),
+ .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned,
});
try func.emitWValue(lhs);
try func.emitWValue(rhs);
@@ -2564,38 +2645,58 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
}
fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
- if (ty.intInfo(func.target).bits > 128) {
- return func.fail("TODO: Implement binary operation for big integer", .{});
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.intInfo(mod).bits > 128) {
+ return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{});
}
- if (op != .add and op != .sub) {
- return func.fail("TODO: Implement binary operation for big integers", .{});
- }
-
- const result = try func.allocStack(ty);
- var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
- defer lhs_high_bit.free(func);
- var rhs_high_bit = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64);
- defer rhs_high_bit.free(func);
- var high_op_res = try (try func.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op)).toLocal(func, Type.u64);
- defer high_op_res.free(func);
-
- const lhs_low_bit = try func.load(lhs, Type.u64, 8);
- const rhs_low_bit = try func.load(rhs, Type.u64, 8);
- const low_op_res = try func.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op);
+ switch (op) {
+ .mul => return func.callIntrinsic("__multi3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
+ .shr => return func.callIntrinsic("__lshrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
+ .shl => return func.callIntrinsic("__ashlti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
+ .xor => {
+ const result = try func.allocStack(ty);
+ try func.emitWValue(result);
+ const lhs_high_bit = try func.load(lhs, Type.u64, 0);
+ const rhs_high_bit = try func.load(rhs, Type.u64, 0);
+ const xor_high_bit = try func.binOp(lhs_high_bit, rhs_high_bit, Type.u64, .xor);
+ try func.store(.stack, xor_high_bit, Type.u64, result.offset());
- const lt = if (op == .add) blk: {
- break :blk try func.cmp(high_op_res, rhs_high_bit, Type.u64, .lt);
- } else if (op == .sub) blk: {
- break :blk try func.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt);
- } else unreachable;
- const tmp = try func.intcast(lt, Type.u32, Type.u64);
- var tmp_op = try (try func.binOp(low_op_res, tmp, Type.u64, op)).toLocal(func, Type.u64);
- defer tmp_op.free(func);
+ try func.emitWValue(result);
+ const lhs_low_bit = try func.load(lhs, Type.u64, 8);
+ const rhs_low_bit = try func.load(rhs, Type.u64, 8);
+ const xor_low_bit = try func.binOp(lhs_low_bit, rhs_low_bit, Type.u64, .xor);
+ try func.store(.stack, xor_low_bit, Type.u64, result.offset() + 8);
+ return result;
+ },
+ .add, .sub => {
+ const result = try func.allocStack(ty);
+ var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
+ defer lhs_high_bit.free(func);
+ var rhs_high_bit = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64);
+ defer rhs_high_bit.free(func);
+ var high_op_res = try (try func.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op)).toLocal(func, Type.u64);
+ defer high_op_res.free(func);
- try func.store(result, high_op_res, Type.u64, 0);
- try func.store(result, tmp_op, Type.u64, 8);
- return result;
+ const lhs_low_bit = try func.load(lhs, Type.u64, 8);
+ const rhs_low_bit = try func.load(rhs, Type.u64, 8);
+ const low_op_res = try func.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op);
+
+ const lt = if (op == .add) blk: {
+ break :blk try func.cmp(high_op_res, rhs_high_bit, Type.u64, .lt);
+ } else if (op == .sub) blk: {
+ break :blk try func.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt);
+ } else unreachable;
+ const tmp = try func.intcast(lt, Type.u32, Type.u64);
+ var tmp_op = try (try func.binOp(low_op_res, tmp, Type.u64, op)).toLocal(func, Type.u64);
+ defer tmp_op.free(func);
+
+ try func.store(result, high_op_res, Type.u64, 0);
+ try func.store(result, tmp_op, Type.u64, 8);
+ return result;
+ },
+ else => return func.fail("TODO: Implement binary operation for big integers: '{s}'", .{@tagName(op)}),
+ }
}
const FloatOp = enum {
@@ -2676,14 +2777,15 @@ const FloatOp = enum {
fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError!void {
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
- const ty = func.air.typeOf(un_op);
+ const ty = func.typeOf(un_op);
const result = try (try func.floatOp(op, ty, &.{operand})).toLocal(func, ty);
func.finishAir(inst, result, &.{un_op});
}
fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue {
- if (ty.zigTypeTag() == .Vector) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement floatOps for vectors", .{});
}
@@ -2693,7 +2795,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
for (args) |operand| {
try func.emitWValue(operand);
}
- const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, func.target) });
+ const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, mod) });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
}
@@ -2741,24 +2843,49 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
};
// fma requires three operands
- var param_types_buffer: [3]Type = .{ ty, ty, ty };
+ var param_types_buffer: [3]InternPool.Index = .{ ty.ip_index, ty.ip_index, ty.ip_index };
const param_types = param_types_buffer[0..args.len];
return func.callIntrinsic(fn_name, param_types, ty, args);
}
fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const ty = func.air.typeOf(bin_op.lhs);
+ const lhs_ty = func.typeOf(bin_op.lhs);
+ const rhs_ty = func.typeOf(bin_op.rhs);
- if (ty.zigTypeTag() == .Vector) {
+ if (lhs_ty.zigTypeTag(mod) == .Vector or rhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement wrapping arithmetic for vectors", .{});
}
- const result = try (try func.wrapBinOp(lhs, rhs, ty, op)).toLocal(func, ty);
- func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+ // For certain operations, such as shifting, the types are different.
+ // When converting this to a WebAssembly type, they *must* match to perform
+ // an operation. For this reason we verify if the WebAssembly type is different, in which
+ // case we first coerce the operands to the same type before performing the operation.
+ // For big integers we can ignore this as we will call into compiler-rt which handles this.
+ const result = switch (op) {
+ .shr, .shl => res: {
+ const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse {
+ return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
+ };
+ const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?;
+ const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: {
+ const tmp = try func.intcast(rhs, rhs_ty, lhs_ty);
+ break :blk try tmp.toLocal(func, lhs_ty);
+ } else rhs;
+ const stack_result = try func.wrapBinOp(lhs, new_rhs, lhs_ty, op);
+ break :res try stack_result.toLocal(func, lhs_ty);
+ },
+ else => res: {
+ const stack_result = try func.wrapBinOp(lhs, rhs, lhs_ty, op);
+ break :res try stack_result.toLocal(func, lhs_ty);
+ },
+ };
+
+ return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
/// Performs a wrapping binary operation.
@@ -2773,8 +2900,9 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr
/// Asserts `Type` is <= 128 bits.
/// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack.
fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
- assert(ty.abiSize(func.target) <= 16);
- const bitsize = @intCast(u16, ty.bitSize(func.target));
+ const mod = func.bin_file.base.options.module.?;
+ assert(ty.abiSize(mod) <= 16);
+ const bitsize = @intCast(u16, ty.bitSize(mod));
const wasm_bits = toWasmBits(bitsize) orelse {
return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize});
};
@@ -2810,44 +2938,48 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
return WValue{ .stack = {} };
}
-fn lowerParentPtr(func: *CodeGen, ptr_val: Value, ptr_child_ty: Type) InnerError!WValue {
- switch (ptr_val.tag()) {
- .decl_ref_mut => {
- const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index;
- return func.lowerParentPtrDecl(ptr_val, decl_index);
+fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
+ const ptr = mod.intern_pool.indexToKey(ptr_val.ip_index).ptr;
+ switch (ptr.addr) {
+ .decl => |decl_index| {
+ return func.lowerParentPtrDecl(ptr_val, decl_index, offset);
},
- .decl_ref => {
- const decl_index = ptr_val.castTag(.decl_ref).?.data;
- return func.lowerParentPtrDecl(ptr_val, decl_index);
+ .mut_decl => |mut_decl| {
+ const decl_index = mut_decl.decl;
+ return func.lowerParentPtrDecl(ptr_val, decl_index, offset);
},
- .variable => {
- const decl_index = ptr_val.castTag(.variable).?.data.owner_decl;
- return func.lowerParentPtrDecl(ptr_val, decl_index);
+ .eu_payload => |tag| return func.fail("TODO: Implement lowerParentPtr for {}", .{tag}),
+ .int => |base| return func.lowerConstant(base.toValue(), Type.usize),
+ .opt_payload => |base_ptr| return func.lowerParentPtr(base_ptr.toValue(), offset),
+ .comptime_field => unreachable,
+ .elem => |elem| {
+ const index = elem.index;
+ const elem_type = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod);
+ const elem_offset = index * elem_type.abiSize(mod);
+ return func.lowerParentPtr(elem.base.toValue(), @intCast(u32, elem_offset + offset));
},
- .field_ptr => {
- const field_ptr = ptr_val.castTag(.field_ptr).?.data;
- const parent_ty = field_ptr.container_ty;
- const parent_ptr = try func.lowerParentPtr(field_ptr.container_ptr, parent_ty);
-
- const offset = switch (parent_ty.zigTypeTag()) {
- .Struct => switch (parent_ty.containerLayout()) {
- .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, func.target),
- else => parent_ty.structFieldOffset(field_ptr.field_index, func.target),
+ .field => |field| {
+ const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod);
+
+ const field_offset = switch (parent_ty.zigTypeTag(mod)) {
+ .Struct => switch (parent_ty.containerLayout(mod)) {
+ .Packed => parent_ty.packedStructFieldByteOffset(@intCast(usize, field.index), mod),
+ else => parent_ty.structFieldOffset(@intCast(usize, field.index), mod),
},
- .Union => switch (parent_ty.containerLayout()) {
+ .Union => switch (parent_ty.containerLayout(mod)) {
.Packed => 0,
else => blk: {
- const layout: Module.Union.Layout = parent_ty.unionGetLayout(func.target);
+ const layout: Module.Union.Layout = parent_ty.unionGetLayout(mod);
if (layout.payload_size == 0) break :blk 0;
if (layout.payload_align > layout.tag_align) break :blk 0;
// tag is stored first so calculate offset from where payload starts
- const offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align));
- break :blk offset;
+ break :blk @intCast(u32, std.mem.alignForward(u64, layout.tag_size, layout.tag_align));
},
},
- .Pointer => switch (parent_ty.ptrSize()) {
- .Slice => switch (field_ptr.field_index) {
+ .Pointer => switch (parent_ty.ptrSize(mod)) {
+ .Slice => switch (field.index) {
0 => 0,
1 => func.ptrSize(),
else => unreachable,
@@ -2856,74 +2988,52 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, ptr_child_ty: Type) InnerError
},
else => unreachable,
};
-
- return switch (parent_ptr) {
- .memory => |ptr| WValue{
- .memory_offset = .{
- .pointer = ptr,
- .offset = @intCast(u32, offset),
- },
- },
- .memory_offset => |mem_off| WValue{
- .memory_offset = .{
- .pointer = mem_off.pointer,
- .offset = @intCast(u32, offset) + mem_off.offset,
- },
- },
- else => unreachable,
- };
- },
- .elem_ptr => {
- const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
- const index = elem_ptr.index;
- const offset = index * ptr_child_ty.abiSize(func.target);
- const array_ptr = try func.lowerParentPtr(elem_ptr.array_ptr, elem_ptr.elem_ty);
-
- return WValue{ .memory_offset = .{
- .pointer = array_ptr.memory,
- .offset = @intCast(u32, offset),
- } };
+ return func.lowerParentPtr(field.base.toValue(), @intCast(u32, offset + field_offset));
},
- .opt_payload_ptr => {
- const payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
- return func.lowerParentPtr(payload_ptr.container_ptr, payload_ptr.container_ty);
- },
- else => |tag| return func.fail("TODO: Implement lowerParentPtr for tag: {}", .{tag}),
}
}
-fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index) InnerError!WValue {
- const module = func.bin_file.base.options.module.?;
- const decl = module.declPtr(decl_index);
- module.markDeclAlive(decl);
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = decl.ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
- return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index);
+fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
+ const decl = mod.declPtr(decl_index);
+ try mod.markDeclAlive(decl);
+ const ptr_ty = try mod.singleMutPtrType(decl.ty);
+ return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index, offset);
}
-fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!WValue {
- if (tv.ty.isSlice()) {
+fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
+ if (tv.ty.isSlice(mod)) {
return WValue{ .memory = try func.bin_file.lowerUnnamedConst(tv, decl_index) };
}
- const module = func.bin_file.base.options.module.?;
- const decl = module.declPtr(decl_index);
- if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) {
+ const decl = mod.declPtr(decl_index);
+ // check if decl is an alias to a function, in which case we
+ // want to lower the actual decl, rather than the alias itself.
+ if (decl.val.getFunction(mod)) |func_val| {
+ if (func_val.owner_decl != decl_index) {
+ return func.lowerDeclRefValue(tv, func_val.owner_decl, offset);
+ }
+ } else if (decl.val.getExternFunc(mod)) |func_val| {
+ if (func_val.decl != decl_index) {
+ return func.lowerDeclRefValue(tv, func_val.decl, offset);
+ }
+ }
+ if (decl.ty.zigTypeTag(mod) != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime(mod)) {
return WValue{ .imm32 = 0xaaaaaaaa };
}
- module.markDeclAlive(decl);
+ try mod.markDeclAlive(decl);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index);
const atom = func.bin_file.getAtom(atom_index);
const target_sym_index = atom.sym_index;
- if (decl.ty.zigTypeTag() == .Fn) {
+ if (decl.ty.zigTypeTag(mod) == .Fn) {
try func.bin_file.addTableFunction(target_sym_index);
return WValue{ .function_index = target_sym_index };
- } else return WValue{ .memory = target_sym_index };
+ } else if (offset == 0) {
+ return WValue{ .memory = target_sym_index };
+ } else return WValue{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } };
}
/// Converts a signed integer to its 2's complement form and returns
@@ -2943,131 +3053,201 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo(
}
fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
var val = arg_val;
- if (val.castTag(.runtime_value)) |rt| {
- val = rt.data;
- }
- if (val.isUndefDeep()) return func.emitUndefined(ty);
- if (val.castTag(.decl_ref)) |decl_ref| {
- const decl_index = decl_ref.data;
- return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index);
- }
- if (val.castTag(.decl_ref_mut)) |decl_ref_mut| {
- const decl_index = decl_ref_mut.data.decl_index;
- return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index);
- }
- const target = func.target;
- switch (ty.zigTypeTag()) {
- .Void => return WValue{ .none = {} },
- .Int => {
- const int_info = ty.intInfo(func.target);
+ switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .runtime_value => |rt| val = rt.val.toValue(),
+ else => {},
+ }
+ if (val.isUndefDeep(mod)) return func.emitUndefined(ty);
+
+ if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) {
+ .Array => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}),
+ .Struct => {
+ const struct_obj = mod.typeToStruct(ty).?;
+ assert(struct_obj.layout == .Packed);
+ var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
+ val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
+ const int_val = try mod.intValue(
+ struct_obj.backing_int_ty,
+ std.mem.readIntLittle(u64, &buf),
+ );
+ return func.lowerConstant(int_val, struct_obj.backing_int_ty);
+ },
+ .Vector => {
+ assert(determineSimdStoreStrategy(ty, mod) == .direct);
+ var buf: [16]u8 = undefined;
+ val.writeToMemory(ty, mod, &buf) catch unreachable;
+ return func.storeSimdImmd(buf);
+ },
+ .Frame,
+ .AnyFrame,
+ => return func.fail("Wasm TODO: LowerConstant for type {}", .{ty.fmt(mod)}),
+ .Float,
+ .Union,
+ .Optional,
+ .ErrorUnion,
+ .ErrorSet,
+ .Int,
+ .Enum,
+ .Bool,
+ .Pointer,
+ => unreachable, // handled below
+ .Type,
+ .Void,
+ .NoReturn,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Undefined,
+ .Null,
+ .Opaque,
+ .EnumLiteral,
+ .Fn,
+ => unreachable, // comptime-only types
+ };
+
+ switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .anon_struct_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+ => unreachable, // types, not values
+
+ .undef, .runtime_value => unreachable, // handled above
+ .simple_value => |simple_value| switch (simple_value) {
+ .undefined,
+ .void,
+ .null,
+ .empty_struct,
+ .@"unreachable",
+ .generic_poison,
+ => unreachable, // non-runtime values
+ .false, .true => return WValue{ .imm32 = switch (simple_value) {
+ .false => 0,
+ .true => 1,
+ else => unreachable,
+ } },
+ },
+ .variable,
+ .extern_func,
+ .func,
+ .enum_literal,
+ .empty_enum_value,
+ => unreachable, // non-runtime values
+ .int => {
+ const int_info = ty.intInfo(mod);
switch (int_info.signedness) {
.signed => switch (int_info.bits) {
0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement(
- val.toSignedInt(target),
+ val.toSignedInt(mod),
@intCast(u6, int_info.bits),
)) },
33...64 => return WValue{ .imm64 = toTwosComplement(
- val.toSignedInt(target),
+ val.toSignedInt(mod),
@intCast(u7, int_info.bits),
) },
else => unreachable,
},
.unsigned => switch (int_info.bits) {
- 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
- 33...64 => return WValue{ .imm64 = val.toUnsignedInt(target) },
+ 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) },
+ 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) },
else => unreachable,
},
}
},
- .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
- .Float => switch (ty.floatBits(func.target)) {
- 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16)) },
- 32 => return WValue{ .float32 = val.toFloat(f32) },
- 64 => return WValue{ .float64 = val.toFloat(f64) },
- else => unreachable,
- },
- .Pointer => switch (val.tag()) {
- .field_ptr, .elem_ptr, .opt_payload_ptr => {
- return func.lowerParentPtr(val, ty.childType());
- },
- .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
- .zero, .null_value => return WValue{ .imm32 = 0 },
- else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}),
+ .err => |err| {
+ const int = try mod.getErrorValue(err.name);
+ return WValue{ .imm32 = int };
},
- .Enum => {
- if (val.castTag(.enum_field_index)) |field_index| {
- switch (ty.tag()) {
- .enum_simple => return WValue{ .imm32 = field_index.data },
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return func.lowerConstant(tag_val, enum_full.tag_ty);
- } else {
- return WValue{ .imm32 = field_index.data };
- }
- },
- .enum_numbered => {
- const index = field_index.data;
- const enum_data = ty.castTag(.enum_numbered).?.data;
- const enum_val = enum_data.values.keys()[index];
- return func.lowerConstant(enum_val, enum_data.tag_ty);
- },
- else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}),
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = ty.intTagType(&int_tag_buffer);
- return func.lowerConstant(val, int_tag_ty);
+ .error_union => |error_union| {
+ const err_tv: TypedValue = switch (error_union.val) {
+ .err_name => |err_name| .{
+ .ty = ty.errorUnionSet(mod),
+ .val = (try mod.intern(.{ .err = .{
+ .ty = ty.errorUnionSet(mod).toIntern(),
+ .name = err_name,
+ } })).toValue(),
+ },
+ .payload => .{
+ .ty = Type.err_int,
+ .val = try mod.intValue(Type.err_int, 0),
+ },
+ };
+ const payload_type = ty.errorUnionPayload(mod);
+ if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) {
+ // We use the error type directly as the type.
+ return func.lowerConstant(err_tv.val, err_tv.ty);
}
+
+ return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{});
},
- .ErrorSet => switch (val.tag()) {
- .@"error" => {
- const kv = try func.bin_file.base.options.module.?.getErrorValue(val.getError().?);
- return WValue{ .imm32 = kv.value };
- },
- else => return WValue{ .imm32 = 0 },
+ .enum_tag => |enum_tag| {
+ const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int);
+ return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType());
},
- .ErrorUnion => {
- const error_type = ty.errorUnionSet();
- const is_pl = val.errorUnionIsPayload();
- const err_val = if (!is_pl) val else Value.initTag(.zero);
- return func.lowerConstant(err_val, error_type);
+ .float => |float| switch (float.storage) {
+ .f16 => |f16_val| return WValue{ .imm32 = @bitCast(u16, f16_val) },
+ .f32 => |f32_val| return WValue{ .float32 = f32_val },
+ .f64 => |f64_val| return WValue{ .float64 = f64_val },
+ else => unreachable,
},
- .Optional => if (ty.optionalReprIsPayload()) {
- var buf: Type.Payload.ElemType = undefined;
- const pl_ty = ty.optionalChild(&buf);
- if (val.castTag(.opt_payload)) |payload| {
- return func.lowerConstant(payload.data, pl_ty);
- } else if (val.isNull()) {
- return WValue{ .imm32 = 0 };
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl, 0),
+ .mut_decl => |mut_decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, mut_decl.decl, 0),
+ .int => |int| return func.lowerConstant(int.toValue(), mod.intern_pool.typeOf(int).toType()),
+ .opt_payload, .elem, .field => return func.lowerParentPtr(val, 0),
+ else => return func.fail("Wasm TODO: lowerConstant for other const addr tag {}", .{ptr.addr}),
+ },
+ .opt => if (ty.optionalReprIsPayload(mod)) {
+ const pl_ty = ty.optionalChild(mod);
+ if (val.optionalValue(mod)) |payload| {
+ return func.lowerConstant(payload, pl_ty);
} else {
- return func.lowerConstant(val, pl_ty);
+ return WValue{ .imm32 = 0 };
}
} else {
- const is_pl = val.tag() == .opt_payload;
- return WValue{ .imm32 = @boolToInt(is_pl) };
+ return WValue{ .imm32 = @boolToInt(!val.isNull(mod)) };
},
- .Struct => {
- const struct_obj = ty.castTag(.@"struct").?.data;
- assert(struct_obj.layout == .Packed);
- var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
- val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
- var payload: Value.Payload.U64 = .{
- .base = .{ .tag = .int_u64 },
- .data = std.mem.readIntLittle(u64, &buf),
- };
- const int_val = Value.initPayload(&payload.base);
- return func.lowerConstant(int_val, struct_obj.backing_int_ty);
+ .aggregate => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}),
+ .vector_type => {
+ assert(determineSimdStoreStrategy(ty, mod) == .direct);
+ var buf: [16]u8 = undefined;
+ val.writeToMemory(ty, mod, &buf) catch unreachable;
+ return func.storeSimdImmd(buf);
+ },
+ .struct_type, .anon_struct_type => {
+ const struct_obj = mod.typeToStruct(ty).?;
+ assert(struct_obj.layout == .Packed);
+ var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
+ val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
+ const int_val = try mod.intValue(
+ struct_obj.backing_int_ty,
+ std.mem.readIntLittle(u64, &buf),
+ );
+ return func.lowerConstant(int_val, struct_obj.backing_int_ty);
+ },
+ else => unreachable,
},
- .Vector => {
- assert(determineSimdStoreStrategy(ty, target) == .direct);
- var buf: [16]u8 = undefined;
- val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable;
- return func.storeSimdImmd(buf);
+ .un => |union_obj| {
+ // in this case we have a packed union which will not be passed by reference.
+ const field_index = ty.unionTagFieldIndex(union_obj.tag.toValue(), func.bin_file.base.options.module.?).?;
+ const field_ty = ty.unionFields(mod).values()[field_index].ty;
+ return func.lowerConstant(union_obj.val.toValue(), field_ty);
},
- else => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}),
+ .memoized_call => unreachable,
}
}
@@ -3080,9 +3260,10 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
}
fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
- switch (ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ switch (ty.zigTypeTag(mod)) {
.Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa },
- .Int => switch (ty.intInfo(func.target).bits) {
+ .Int, .Enum => switch (ty.intInfo(mod).bits) {
0...32 => return WValue{ .imm32 = 0xaaaaaaaa },
33...64 => return WValue{ .imm64 = 0xaaaaaaaaaaaaaaaa },
else => unreachable,
@@ -3099,9 +3280,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
else => unreachable,
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const pl_ty = ty.optionalChild(&buf);
- if (ty.optionalReprIsPayload()) {
+ const pl_ty = ty.optionalChild(mod);
+ if (ty.optionalReprIsPayload(mod)) {
return func.emitUndefined(pl_ty);
}
return WValue{ .imm32 = 0xaaaaaaaa };
@@ -3110,11 +3290,11 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
return WValue{ .imm32 = 0xaaaaaaaa };
},
.Struct => {
- const struct_obj = ty.castTag(.@"struct").?.data;
+ const struct_obj = mod.typeToStruct(ty).?;
assert(struct_obj.layout == .Packed);
return func.emitUndefined(struct_obj.backing_int_ty);
},
- else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag()}),
+ else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}),
}
}
@@ -3122,56 +3302,52 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
/// It's illegal to provide a value with a type that cannot be represented
/// as an integer value.
fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
- const target = func.target;
- switch (ty.zigTypeTag()) {
- .Enum => {
- if (val.castTag(.enum_field_index)) |field_index| {
- switch (ty.tag()) {
- .enum_simple => return @bitCast(i32, field_index.data),
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index.data];
- return func.valueAsI32(tag_val, enum_full.tag_ty);
- } else return @bitCast(i32, field_index.data);
- },
- .enum_numbered => {
- const index = field_index.data;
- const enum_data = ty.castTag(.enum_numbered).?.data;
- return func.valueAsI32(enum_data.values.keys()[index], enum_data.tag_ty);
- },
- else => unreachable,
- }
- } else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = ty.intTagType(&int_tag_buffer);
- return func.valueAsI32(val, int_tag_ty);
- }
- },
- .Int => switch (ty.intInfo(func.target).signedness) {
- .signed => return @truncate(i32, val.toSignedInt(target)),
- .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(target))),
- },
- .ErrorSet => {
- const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function
- return @bitCast(i32, kv.value);
+ const mod = func.bin_file.base.options.module.?;
+
+ switch (val.ip_index) {
+ .none => {},
+ .bool_true => return 1,
+ .bool_false => return 0,
+ else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod),
+ .int => |int| intStorageAsI32(int.storage, mod),
+ .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod),
+ .err => |err| @bitCast(i32, @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err.name).?)),
+ else => unreachable,
},
- .Bool => return @intCast(i32, val.toSignedInt(target)),
- .Pointer => return @intCast(i32, val.toSignedInt(target)),
- else => unreachable, // Programmer called this function for an illegal type
}
+
+ return switch (ty.zigTypeTag(mod)) {
+ .ErrorSet => @bitCast(i32, val.getErrorInt(mod)),
+ else => unreachable, // Programmer called this function for an illegal type
+ };
+}
+
+fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32 {
+ return intStorageAsI32(ip.indexToKey(int).int.storage, mod);
+}
+
+fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 {
+ return switch (storage) {
+ .i64 => |x| @intCast(i32, x),
+ .u64 => |x| @bitCast(i32, @intCast(u32, x)),
+ .big_int => unreachable,
+ .lazy_align => |ty| @bitCast(i32, ty.toType().abiAlignment(mod)),
+ .lazy_size => |ty| @bitCast(i32, @intCast(u32, ty.toType().abiSize(mod))),
+ };
}
fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const block_ty = func.air.getRefType(ty_pl.ty);
- const wasm_block_ty = genBlockType(block_ty, func.target);
+ const wasm_block_ty = genBlockType(block_ty, mod);
const extra = func.air.extraData(Air.Block, ty_pl.payload);
const body = func.air.extra[extra.end..][0..extra.data.body_len];
// if wasm_block_ty is non-empty, we create a register to store the temporary value
const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: {
- const ty: Type = if (isByRef(block_ty, func.target)) Type.u32 else block_ty;
+ const ty: Type = if (isByRef(block_ty, mod)) Type.u32 else block_ty;
break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten
} else WValue.none;
@@ -3182,9 +3358,13 @@ fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.label = func.block_depth,
.value = block_result,
});
+
try func.genBody(body);
try func.endBlock();
+ const liveness = func.liveness.getBlock(inst);
+ try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths.len);
+
func.finishAir(inst, block_result, &.{});
}
@@ -3239,47 +3419,37 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addLabel(.br_if, 0);
try func.branches.ensureUnusedCapacity(func.gpa, 2);
-
- func.branches.appendAssumeCapacity(.{});
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.else_deaths.len));
- try func.genBody(else_body);
- try func.endBlock();
- var else_stack = func.branches.pop();
- defer else_stack.deinit(func.gpa);
+ {
+ func.branches.appendAssumeCapacity(.{});
+ try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.else_deaths.len));
+ defer {
+ var else_stack = func.branches.pop();
+ else_stack.deinit(func.gpa);
+ }
+ try func.genBody(else_body);
+ try func.endBlock();
+ }
// Outer block that matches the condition
- func.branches.appendAssumeCapacity(.{});
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.then_deaths.len));
- try func.genBody(then_body);
- var then_stack = func.branches.pop();
- defer then_stack.deinit(func.gpa);
-
- try func.mergeBranch(&else_stack);
- try func.mergeBranch(&then_stack);
+ {
+ func.branches.appendAssumeCapacity(.{});
+ try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.then_deaths.len));
+ defer {
+ var then_stack = func.branches.pop();
+ then_stack.deinit(func.gpa);
+ }
+ try func.genBody(then_body);
+ }
func.finishAir(inst, .none, &.{});
}
-fn mergeBranch(func: *CodeGen, branch: *const Branch) !void {
- const parent = func.currentBranch();
-
- const target_slice = branch.values.entries.slice();
- const target_keys = target_slice.items(.key);
- const target_values = target_slice.items(.value);
-
- try parent.values.ensureTotalCapacity(func.gpa, parent.values.capacity() + branch.values.count());
- for (target_keys, 0..) |key, index| {
- // TODO: process deaths from branches
- parent.values.putAssumeCapacity(key, target_values[index]);
- }
-}
-
fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const operand_ty = func.air.typeOf(bin_op.lhs);
+ const operand_ty = func.typeOf(bin_op.lhs);
const result = try (try func.cmp(lhs, rhs, operand_ty, op)).toLocal(func, Type.u32); // comparison result is always 32 bits
func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
@@ -3289,16 +3459,16 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In
/// NOTE: This leaves the result on top of the stack, rather than a new local.
fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
assert(!(lhs != .stack and rhs == .stack));
- if (ty.zigTypeTag() == .Optional and !ty.optionalReprIsPayload()) {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&buf);
- if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) {
+ const payload_ty = ty.optionalChild(mod);
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// When we hit this case, we must check the value of optionals
// that are not pointers. This means first checking against non-null for
// both lhs and rhs, as well as checking the payload are matching of lhs and rhs
return func.cmpOptionals(lhs, rhs, ty, op);
}
- } else if (isByRef(ty, func.target)) {
+ } else if (isByRef(ty, mod)) {
return func.cmpBigInt(lhs, rhs, ty, op);
} else if (ty.isAnyFloat() and ty.floatBits(func.target) == 16) {
return func.cmpFloat16(lhs, rhs, op);
@@ -3311,13 +3481,13 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
const signedness: std.builtin.Signedness = blk: {
// by default we tell the operand type is unsigned (i.e. bools and enum values)
- if (ty.zigTypeTag() != .Int) break :blk .unsigned;
+ if (ty.zigTypeTag(mod) != .Int) break :blk .unsigned;
// incase of an actual integer, we emit the correct signedness
- break :blk ty.intInfo(func.target).signedness;
+ break :blk ty.intInfo(mod).signedness;
};
const opcode: wasm.Opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, func.target),
+ .valtype1 = typeToValtype(ty, mod),
.op = switch (op) {
.lt => .lt,
.lte => .le,
@@ -3374,11 +3544,12 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const br = func.air.instructions.items(.data)[inst].br;
const block = func.blocks.get(br.block_inst).?;
// if operand has codegen bits we should break with a value
- if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime()) {
+ if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) {
const operand = try func.resolveInst(br.operand);
try func.lowerToStack(operand);
@@ -3399,17 +3570,18 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const operand_ty = func.air.typeOf(ty_op.operand);
+ const operand_ty = func.typeOf(ty_op.operand);
+ const mod = func.bin_file.base.options.module.?;
const result = result: {
- if (operand_ty.zigTypeTag() == .Bool) {
+ if (operand_ty.zigTypeTag(mod) == .Bool) {
try func.emitWValue(operand);
try func.addTag(.i32_eqz);
const not_tmp = try func.allocLocal(operand_ty);
try func.addLabel(.local_set, not_tmp.local.value);
break :result not_tmp;
} else {
- const operand_bits = operand_ty.intInfo(func.target).bits;
+ const operand_bits = operand_ty.intInfo(mod).bits;
const wasm_bits = toWasmBits(operand_bits) orelse {
return func.fail("TODO: Implement binary NOT for integer with bitsize '{d}'", .{operand_bits});
};
@@ -3464,8 +3636,8 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const result = result: {
const operand = try func.resolveInst(ty_op.operand);
- const wanted_ty = func.air.typeOfIndex(inst);
- const given_ty = func.air.typeOf(ty_op.operand);
+ const wanted_ty = func.typeOfIndex(inst);
+ const given_ty = func.typeOf(ty_op.operand);
if (given_ty.isAnyFloat() or wanted_ty.isAnyFloat()) {
const bitcast_result = try func.bitcast(wanted_ty, given_ty, operand);
break :result try bitcast_result.toLocal(func, wanted_ty);
@@ -3476,16 +3648,17 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
// if we bitcast a float to or from an integer we must use the 'reinterpret' instruction
if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand;
- if (wanted_ty.tag() == .f16 or given_ty.tag() == .f16) return operand;
- if (wanted_ty.bitSize(func.target) > 64) return operand;
- assert((wanted_ty.isInt() and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt()));
+ if (wanted_ty.ip_index == .f16_type or given_ty.ip_index == .f16_type) return operand;
+ if (wanted_ty.bitSize(mod) > 64) return operand;
+ assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod)));
const opcode = buildOpcode(.{
.op = .reinterpret,
- .valtype1 = typeToValtype(wanted_ty, func.target),
- .valtype2 = typeToValtype(given_ty, func.target),
+ .valtype1 = typeToValtype(wanted_ty, mod),
+ .valtype2 = typeToValtype(given_ty, mod),
});
try func.emitWValue(operand);
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
@@ -3493,19 +3666,21 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn
}
fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.StructField, ty_pl.payload);
const struct_ptr = try func.resolveInst(extra.data.struct_operand);
- const struct_ty = func.air.typeOf(extra.data.struct_operand).childType();
+ const struct_ty = func.typeOf(extra.data.struct_operand).childType(mod);
const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ty, extra.data.field_index);
func.finishAir(inst, result, &.{extra.data.struct_operand});
}
fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const struct_ptr = try func.resolveInst(ty_op.operand);
- const struct_ty = func.air.typeOf(ty_op.operand).childType();
+ const struct_ty = func.typeOf(ty_op.operand).childType(mod);
const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ty, index);
func.finishAir(inst, result, &.{ty_op.operand});
@@ -3519,19 +3694,20 @@ fn structFieldPtr(
struct_ty: Type,
index: u32,
) InnerError!WValue {
- const result_ty = func.air.typeOfIndex(inst);
- const offset = switch (struct_ty.containerLayout()) {
- .Packed => switch (struct_ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ const result_ty = func.typeOfIndex(inst);
+ const offset = switch (struct_ty.containerLayout(mod)) {
+ .Packed => switch (struct_ty.zigTypeTag(mod)) {
.Struct => offset: {
- if (result_ty.ptrInfo().data.host_size != 0) {
+ if (result_ty.ptrInfo(mod).host_size != 0) {
break :offset @as(u32, 0);
}
- break :offset struct_ty.packedStructFieldByteOffset(index, func.target);
+ break :offset struct_ty.packedStructFieldByteOffset(index, mod);
},
.Union => 0,
else => unreachable,
},
- else => struct_ty.structFieldOffset(index, func.target),
+ else => struct_ty.structFieldOffset(index, mod),
};
// save a load and store when we can simply reuse the operand
if (offset == 0) {
@@ -3546,23 +3722,23 @@ fn structFieldPtr(
}
fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
- const struct_ty = func.air.typeOf(struct_field.struct_operand);
+ const struct_ty = func.typeOf(struct_field.struct_operand);
const operand = try func.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
- const field_ty = struct_ty.structFieldType(field_index);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
+ const field_ty = struct_ty.structFieldType(field_index, mod);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
- const result = switch (struct_ty.containerLayout()) {
- .Packed => switch (struct_ty.zigTypeTag()) {
+ const result = switch (struct_ty.containerLayout(mod)) {
+ .Packed => switch (struct_ty.zigTypeTag(mod)) {
.Struct => result: {
- const struct_obj = struct_ty.castTag(.@"struct").?.data;
- assert(struct_obj.layout == .Packed);
- const offset = struct_obj.packedFieldBitOffset(func.target, field_index);
+ const struct_obj = mod.typeToStruct(struct_ty).?;
+ const offset = struct_obj.packedFieldBitOffset(mod, field_index);
const backing_ty = struct_obj.backing_int_ty;
- const wasm_bits = toWasmBits(backing_ty.intInfo(func.target).bits) orelse {
+ const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
};
const const_wvalue = if (wasm_bits == 32)
@@ -3578,40 +3754,56 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else
try func.binOp(operand, const_wvalue, backing_ty, .shr);
- if (field_ty.zigTypeTag() == .Float) {
- var payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field_ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&payload.base);
+ if (field_ty.zigTypeTag(mod) == .Float) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
const bitcasted = try func.bitcast(field_ty, int_type, truncated);
break :result try bitcasted.toLocal(func, field_ty);
- } else if (field_ty.isPtrAtRuntime() and struct_obj.fields.count() == 1) {
+ } else if (field_ty.isPtrAtRuntime(mod) and struct_obj.fields.count() == 1) {
// In this case we do not have to perform any transformations,
// we can simply reuse the operand.
break :result func.reuseOperand(struct_field.struct_operand, operand);
- } else if (field_ty.isPtrAtRuntime()) {
- var payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field_ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&payload.base);
+ } else if (field_ty.isPtrAtRuntime(mod)) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
break :result try truncated.toLocal(func, field_ty);
}
const truncated = try func.trunc(shifted_value, field_ty, backing_ty);
break :result try truncated.toLocal(func, field_ty);
},
- .Union => return func.fail("TODO: airStructFieldVal for packed unions", .{}),
+ .Union => result: {
+ if (isByRef(struct_ty, mod)) {
+ if (!isByRef(field_ty, mod)) {
+ const val = try func.load(operand, field_ty, 0);
+ break :result try val.toLocal(func, field_ty);
+ } else {
+ const new_stack_val = try func.allocStack(field_ty);
+ try func.store(new_stack_val, operand, field_ty, 0);
+ break :result new_stack_val;
+ }
+ }
+
+ const union_int_type = try mod.intType(.unsigned, @intCast(u16, struct_ty.bitSize(mod)));
+ if (field_ty.zigTypeTag(mod) == .Float) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
+ const truncated = try func.trunc(operand, int_type, union_int_type);
+ const bitcasted = try func.bitcast(field_ty, int_type, truncated);
+ break :result try bitcasted.toLocal(func, field_ty);
+ } else if (field_ty.isPtrAtRuntime(mod)) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
+ const truncated = try func.trunc(operand, int_type, union_int_type);
+ break :result try truncated.toLocal(func, field_ty);
+ }
+ const truncated = try func.trunc(operand, field_ty, union_int_type);
+ break :result try truncated.toLocal(func, field_ty);
+ },
else => unreachable,
},
else => result: {
- const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
- return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(module)});
+ const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, mod)) orelse {
+ return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(mod)});
};
- if (isByRef(field_ty, func.target)) {
+ if (isByRef(field_ty, mod)) {
switch (operand) {
.stack_offset => |stack_offset| {
break :result WValue{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } };
@@ -3628,11 +3820,12 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
// result type is always 'noreturn'
const blocktype = wasm.block_empty;
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
const target = try func.resolveInst(pl_op.operand);
- const target_ty = func.air.typeOf(pl_op.operand);
+ const target_ty = func.typeOf(pl_op.operand);
const switch_br = func.air.extraData(Air.SwitchBr, pl_op.payload);
const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.data.cases_len + 1);
defer func.gpa.free(liveness.deaths);
@@ -3661,7 +3854,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
errdefer func.gpa.free(values);
for (items, 0..) |ref, i| {
- const item_val = func.air.value(ref).?;
+ const item_val = (try func.air.value(ref, mod)).?;
const int_val = func.valueAsI32(item_val, target_ty);
if (lowest_maybe == null or int_val < lowest_maybe.?) {
lowest_maybe = int_val;
@@ -3684,7 +3877,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When the target is an integer size larger than u32, we have no way to use the value
// as an index, therefore we also use an if/else-chain for those cases.
// TODO: Benchmark this to find a proper value, LLVM seems to draw the line at '40~45'.
- const is_sparse = highest - lowest > 50 or target_ty.bitSize(func.target) > 32;
+ const is_sparse = highest - lowest > 50 or target_ty.bitSize(mod) > 32;
const else_body = func.air.extra[extra_index..][0..switch_br.data.else_body_len];
const has_else_body = else_body.len != 0;
@@ -3729,7 +3922,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// for errors that are not present in any branch. This is fine as this default
// case will never be hit for those cases but we do save runtime cost and size
// by using a jump table for this instead of if-else chains.
- break :blk if (has_else_body or target_ty.zigTypeTag() == .ErrorSet) case_i else unreachable;
+ break :blk if (has_else_body or target_ty.zigTypeTag(mod) == .ErrorSet) case_i else unreachable;
};
func.mir_extra.appendAssumeCapacity(idx);
} else if (has_else_body) {
@@ -3740,10 +3933,10 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const signedness: std.builtin.Signedness = blk: {
// by default we tell the operand type is unsigned (i.e. bools and enum values)
- if (target_ty.zigTypeTag() != .Int) break :blk .unsigned;
+ if (target_ty.zigTypeTag(mod) != .Int) break :blk .unsigned;
// incase of an actual integer, we emit the correct signedness
- break :blk target_ty.intInfo(func.target).signedness;
+ break :blk target_ty.intInfo(mod).signedness;
};
try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @boolToInt(has_else_body));
@@ -3756,7 +3949,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const val = try func.lowerConstant(case.values[0].value, target_ty);
try func.emitWValue(val);
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(target_ty, func.target),
+ .valtype1 = typeToValtype(target_ty, mod),
.op = .ne, // not equal, because we want to jump out of this block if it does not match the condition.
.signedness = signedness,
});
@@ -3770,7 +3963,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const val = try func.lowerConstant(value.value, target_ty);
try func.emitWValue(val);
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(target_ty, func.target),
+ .valtype1 = typeToValtype(target_ty, mod),
.op = .eq,
.signedness = signedness,
});
@@ -3783,42 +3976,38 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
}
func.branches.appendAssumeCapacity(.{});
-
try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[index].len);
- for (liveness.deaths[index]) |operand| {
- func.processDeath(Air.indexToRef(operand));
+ defer {
+ var case_branch = func.branches.pop();
+ case_branch.deinit(func.gpa);
}
try func.genBody(case.body);
try func.endBlock();
- var case_branch = func.branches.pop();
- defer case_branch.deinit(func.gpa);
- try func.mergeBranch(&case_branch);
}
if (has_else_body) {
func.branches.appendAssumeCapacity(.{});
const else_deaths = liveness.deaths.len - 1;
try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[else_deaths].len);
- for (liveness.deaths[else_deaths]) |operand| {
- func.processDeath(Air.indexToRef(operand));
+ defer {
+ var else_branch = func.branches.pop();
+ else_branch.deinit(func.gpa);
}
try func.genBody(else_body);
try func.endBlock();
- var else_branch = func.branches.pop();
- defer else_branch.deinit(func.gpa);
- try func.mergeBranch(&else_branch);
}
func.finishAir(inst, .none, &.{});
}
fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
- const err_union_ty = func.air.typeOf(un_op);
- const pl_ty = err_union_ty.errorUnionPayload();
+ const err_union_ty = func.typeOf(un_op);
+ const pl_ty = err_union_ty.errorUnionPayload(mod);
const result = result: {
- if (err_union_ty.errorUnionSet().errorSetIsEmpty()) {
+ if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
switch (opcode) {
.i32_ne => break :result WValue{ .imm32 = 0 },
.i32_eq => break :result WValue{ .imm32 = 1 },
@@ -3827,10 +4016,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
}
try func.emitWValue(operand);
- if (pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try func.addMemArg(.i32_load16_u, .{
- .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, func.target)),
- .alignment = Type.anyerror.abiAlignment(func.target),
+ .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, mod)),
+ .alignment = Type.anyerror.abiAlignment(mod),
});
}
@@ -3846,18 +4035,24 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
}
fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const op_ty = func.air.typeOf(ty_op.operand);
- const err_ty = if (op_is_ptr) op_ty.childType() else op_ty;
- const payload_ty = err_ty.errorUnionPayload();
+ const op_ty = func.typeOf(ty_op.operand);
+ const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty;
+ const payload_ty = err_ty.errorUnionPayload(mod);
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result WValue{ .none = {} };
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (op_is_ptr) {
+ break :result func.reuseOperand(ty_op.operand, operand);
+ }
+ break :result WValue{ .none = {} };
+ }
- const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, func.target));
- if (op_is_ptr or isByRef(payload_ty, func.target)) {
+ const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
+ if (op_is_ptr or isByRef(payload_ty, mod)) {
break :result try func.buildPointerOffset(operand, pl_offset, .new);
}
@@ -3868,48 +4063,50 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
}
fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const op_ty = func.air.typeOf(ty_op.operand);
- const err_ty = if (op_is_ptr) op_ty.childType() else op_ty;
- const payload_ty = err_ty.errorUnionPayload();
+ const op_ty = func.typeOf(ty_op.operand);
+ const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty;
+ const payload_ty = err_ty.errorUnionPayload(mod);
const result = result: {
- if (err_ty.errorUnionSet().errorSetIsEmpty()) {
+ if (err_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
break :result WValue{ .imm32 = 0 };
}
- if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, func.target)));
+ const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, mod)));
break :result try error_val.toLocal(func, Type.anyerror);
};
func.finishAir(inst, result, &.{ty_op.operand});
}
fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const err_ty = func.air.typeOfIndex(inst);
+ const err_ty = func.typeOfIndex(inst);
- const pl_ty = func.air.typeOf(ty_op.operand);
+ const pl_ty = func.typeOf(ty_op.operand);
const result = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
const err_union = try func.allocStack(err_ty);
- const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new);
+ const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new);
try func.store(payload_ptr, operand, pl_ty, 0);
// ensure we also write '0' to the error part, so any present stack value gets overwritten by it.
try func.emitWValue(err_union);
try func.addImm32(0);
- const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target));
+ const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod));
try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 });
break :result err_union;
};
@@ -3917,25 +4114,26 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
}
fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const err_ty = func.air.getRefType(ty_op.ty);
- const pl_ty = err_ty.errorUnionPayload();
+ const pl_ty = err_ty.errorUnionPayload(mod);
const result = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
const err_union = try func.allocStack(err_ty);
// store error value
- try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, func.target)));
+ try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, mod)));
// write 'undefined' to the payload
- const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new);
- const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(func.target));
- try func.memset(payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaaaaaaaa });
+ const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new);
+ const len = @intCast(u32, err_ty.errorUnionPayload(mod).abiSize(mod));
+ try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa });
break :result err_union;
};
@@ -3947,15 +4145,22 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.air.getRefType(ty_op.ty);
const operand = try func.resolveInst(ty_op.operand);
- const operand_ty = func.air.typeOf(ty_op.operand);
- if (ty.zigTypeTag() == .Vector or operand_ty.zigTypeTag() == .Vector) {
+ const operand_ty = func.typeOf(ty_op.operand);
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) {
return func.fail("todo Wasm intcast for vectors", .{});
}
- if (ty.abiSize(func.target) > 16 or operand_ty.abiSize(func.target) > 16) {
+ if (ty.abiSize(mod) > 16 or operand_ty.abiSize(mod) > 16) {
return func.fail("todo Wasm intcast for bitsize > 128", .{});
}
- const result = try (try func.intcast(operand, operand_ty, ty)).toLocal(func, ty);
+ const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(mod))).?;
+ const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
+ const result = if (op_bits == wanted_bits)
+ func.reuseOperand(ty_op.operand, operand)
+ else
+ try (try func.intcast(operand, operand_ty, ty)).toLocal(func, ty);
+
func.finishAir(inst, result, &.{});
}
@@ -3964,8 +4169,9 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Asserts type's bitsize <= 128
/// NOTE: May leave the result on the top of the stack.
fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
- const given_bitsize = @intCast(u16, given.bitSize(func.target));
- const wanted_bitsize = @intCast(u16, wanted.bitSize(func.target));
+ const mod = func.bin_file.base.options.module.?;
+ const given_bitsize = @intCast(u16, given.bitSize(mod));
+ const wanted_bitsize = @intCast(u16, wanted.bitSize(mod));
assert(given_bitsize <= 128);
assert(wanted_bitsize <= 128);
@@ -3978,7 +4184,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
try func.addTag(.i32_wrap_i64);
} else if (op_bits == 32 and wanted_bits > 32 and wanted_bits <= 64) {
try func.emitWValue(operand);
- try func.addTag(if (wanted.isSignedInt()) .i64_extend_i32_s else .i64_extend_i32_u);
+ try func.addTag(if (wanted.isSignedInt(mod)) .i64_extend_i32_s else .i64_extend_i32_u);
} else if (wanted_bits == 128) {
// for 128bit integers we store the integer in the virtual stack, rather than a local
const stack_ptr = try func.allocStack(wanted);
@@ -3987,14 +4193,14 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
// for 32 bit integers, we first coerce the value into a 64 bit integer before storing it
// meaning less store operations are required.
const lhs = if (op_bits == 32) blk: {
- break :blk try func.intcast(operand, given, if (wanted.isSignedInt()) Type.i64 else Type.u64);
+ break :blk try func.intcast(operand, given, if (wanted.isSignedInt(mod)) Type.i64 else Type.u64);
} else operand;
// store msb first
try func.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset());
// For signed integers we shift msb by 63 (64bit integer - 1 sign bit) and store remaining value
- if (wanted.isSignedInt()) {
+ if (wanted.isSignedInt(mod)) {
try func.emitWValue(stack_ptr);
const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr);
try func.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset());
@@ -4009,11 +4215,12 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
}
fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
- const op_ty = func.air.typeOf(un_op);
- const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty;
+ const op_ty = func.typeOf(un_op);
+ const optional_ty = if (op_kind == .ptr) op_ty.childType(mod) else op_ty;
const is_null = try func.isNull(operand, optional_ty, opcode);
const result = try is_null.toLocal(func, optional_ty);
func.finishAir(inst, result, &.{un_op});
@@ -4022,20 +4229,19 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind:
/// For a given type and operand, checks if it's considered `null`.
/// NOTE: Leaves the result on the stack
fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
try func.emitWValue(operand);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&buf);
- if (!optional_ty.optionalReprIsPayload()) {
+ const payload_ty = optional_ty.optionalChild(mod);
+ if (!optional_ty.optionalReprIsPayload(mod)) {
// When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value
- if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
- const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
- return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(module)});
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse {
+ return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(mod)});
};
try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
}
- } else if (payload_ty.isSlice()) {
+ } else if (payload_ty.isSlice(mod)) {
switch (func.arch()) {
.wasm32 => try func.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }),
.wasm64 => try func.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }),
@@ -4051,18 +4257,19 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod
}
fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- const opt_ty = func.air.typeOf(ty_op.operand);
- const payload_ty = func.air.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ const opt_ty = func.typeOf(ty_op.operand);
+ const payload_ty = func.typeOfIndex(inst);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.finishAir(inst, .none, &.{ty_op.operand});
}
const result = result: {
const operand = try func.resolveInst(ty_op.operand);
- if (opt_ty.optionalReprIsPayload()) break :result func.reuseOperand(ty_op.operand, operand);
+ if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand);
- if (isByRef(payload_ty, func.target)) {
+ if (isByRef(payload_ty, mod)) {
break :result try func.buildPointerOffset(operand, 0, .new);
}
@@ -4073,14 +4280,14 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const opt_ty = func.air.typeOf(ty_op.operand).childType();
+ const opt_ty = func.typeOf(ty_op.operand).childType(mod);
const result = result: {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = opt_ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.optionalReprIsPayload()) {
+ const payload_ty = opt_ty.optionalChild(mod);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
@@ -4090,22 +4297,21 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const opt_ty = func.air.typeOf(ty_op.operand).childType();
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = opt_ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ const opt_ty = func.typeOf(ty_op.operand).childType(mod);
+ const payload_ty = opt_ty.optionalChild(mod);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
}
- if (opt_ty.optionalReprIsPayload()) {
+ if (opt_ty.optionalReprIsPayload(mod)) {
return func.finishAir(inst, operand, &.{ty_op.operand});
}
- const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
- return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)});
+ const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse {
+ return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(mod)});
};
try func.emitWValue(operand);
@@ -4118,11 +4324,12 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- const payload_ty = func.air.typeOf(ty_op.operand);
+ const payload_ty = func.typeOf(ty_op.operand);
+ const mod = func.bin_file.base.options.module.?;
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
- const non_null_bit = try func.allocStack(Type.initTag(.u1));
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ const non_null_bit = try func.allocStack(Type.u1);
try func.emitWValue(non_null_bit);
try func.addImm32(1);
try func.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 });
@@ -4130,13 +4337,12 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
const operand = try func.resolveInst(ty_op.operand);
- const op_ty = func.air.typeOfIndex(inst);
- if (op_ty.optionalReprIsPayload()) {
+ const op_ty = func.typeOfIndex(inst);
+ if (op_ty.optionalReprIsPayload(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
- return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)});
+ const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse {
+ return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(mod)});
};
// Create optional type, set the non-null bit, and store the operand inside the optional type
@@ -4159,7 +4365,7 @@ fn airSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const slice_ty = func.air.typeOfIndex(inst);
+ const slice_ty = func.typeOfIndex(inst);
const slice = try func.allocStack(slice_ty);
try func.store(slice, lhs, Type.usize, 0);
@@ -4176,13 +4382,14 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- const slice_ty = func.air.typeOf(bin_op.lhs);
+ const slice_ty = func.typeOf(bin_op.lhs);
const slice = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
- const elem_ty = slice_ty.childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const elem_ty = slice_ty.childType(mod);
+ const elem_size = elem_ty.abiSize(mod);
// load pointer onto stack
_ = try func.load(slice, Type.usize, 0);
@@ -4196,7 +4403,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocLocal(Type.usize);
try func.addLabel(.local_set, result_ptr.local.value);
- const result = if (!isByRef(elem_ty, func.target)) result: {
+ const result = if (!isByRef(elem_ty, mod)) result: {
const elem_val = try func.load(result_ptr, elem_ty, 0);
break :result try elem_val.toLocal(func, elem_ty);
} else result_ptr;
@@ -4205,11 +4412,12 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
- const elem_ty = func.air.getRefType(ty_pl.ty).childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod);
+ const elem_size = elem_ty.abiSize(mod);
const slice = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
@@ -4248,7 +4456,7 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const wanted_ty = func.air.getRefType(ty_op.ty);
- const op_ty = func.air.typeOf(ty_op.operand);
+ const op_ty = func.typeOf(ty_op.operand);
const result = try func.trunc(operand, wanted_ty, op_ty);
func.finishAir(inst, try result.toLocal(func, wanted_ty), &.{ty_op.operand});
@@ -4257,13 +4465,14 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Truncates a given operand to a given type, discarding any overflown bits.
/// NOTE: Resulting value is left on the stack.
fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue {
- const given_bits = @intCast(u16, given_ty.bitSize(func.target));
+ const mod = func.bin_file.base.options.module.?;
+ const given_bits = @intCast(u16, given_ty.bitSize(mod));
if (toWasmBits(given_bits) == null) {
return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits});
}
var result = try func.intcast(operand, given_ty, wanted_ty);
- const wanted_bits = @intCast(u16, wanted_ty.bitSize(func.target));
+ const wanted_bits = @intCast(u16, wanted_ty.bitSize(mod));
const wasm_bits = toWasmBits(wanted_bits).?;
if (wasm_bits != wanted_bits) {
result = try func.wrapOperand(result, wanted_ty);
@@ -4280,32 +4489,34 @@ fn airBoolToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const array_ty = func.air.typeOf(ty_op.operand).childType();
+ const array_ty = func.typeOf(ty_op.operand).childType(mod);
const slice_ty = func.air.getRefType(ty_op.ty);
// create a slice on the stack
const slice_local = try func.allocStack(slice_ty);
// store the array ptr in the slice
- if (array_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try func.store(slice_local, operand, Type.usize, 0);
}
// store the length of the array in the slice
- const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen()) };
+ const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen(mod)) };
try func.store(slice_local, len, Type.usize, func.ptrSize());
func.finishAir(inst, slice_local, &.{ty_op.operand});
}
fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
- const ptr_ty = func.air.typeOf(un_op);
- const result = if (ptr_ty.isSlice())
+ const ptr_ty = func.typeOf(un_op);
+ const result = if (ptr_ty.isSlice(mod))
try func.slicePtr(operand)
else switch (operand) {
// for stack offset, return a pointer to this offset.
@@ -4316,16 +4527,17 @@ fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- const ptr_ty = func.air.typeOf(bin_op.lhs);
+ const ptr_ty = func.typeOf(bin_op.lhs);
const ptr = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
- const elem_ty = ptr_ty.childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const elem_ty = ptr_ty.childType(mod);
+ const elem_size = elem_ty.abiSize(mod);
// load pointer onto the stack
- if (ptr_ty.isSlice()) {
+ if (ptr_ty.isSlice(mod)) {
_ = try func.load(ptr, Type.usize, 0);
} else {
try func.lowerToStack(ptr);
@@ -4338,9 +4550,9 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_add);
const elem_result = val: {
- var result = try func.allocLocal(elem_ty);
+ var result = try func.allocLocal(Type.usize);
try func.addLabel(.local_set, result.local.value);
- if (isByRef(elem_ty, func.target)) {
+ if (isByRef(elem_ty, mod)) {
break :val result;
}
defer result.free(func); // only free if it's not returned like above
@@ -4352,18 +4564,19 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
- const ptr_ty = func.air.typeOf(bin_op.lhs);
- const elem_ty = func.air.getRefType(ty_pl.ty).childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const ptr_ty = func.typeOf(bin_op.lhs);
+ const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod);
+ const elem_size = elem_ty.abiSize(mod);
const ptr = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
// load pointer onto the stack
- if (ptr_ty.isSlice()) {
+ if (ptr_ty.isSlice(mod)) {
_ = try func.load(ptr, Type.usize, 0);
} else {
try func.lowerToStack(ptr);
@@ -4381,24 +4594,25 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try func.resolveInst(bin_op.lhs);
const offset = try func.resolveInst(bin_op.rhs);
- const ptr_ty = func.air.typeOf(bin_op.lhs);
- const pointee_ty = switch (ptr_ty.ptrSize()) {
- .One => ptr_ty.childType().childType(), // ptr to array, so get array element type
- else => ptr_ty.childType(),
+ const ptr_ty = func.typeOf(bin_op.lhs);
+ const pointee_ty = switch (ptr_ty.ptrSize(mod)) {
+ .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
+ else => ptr_ty.childType(mod),
};
- const valtype = typeToValtype(Type.usize, func.target);
+ const valtype = typeToValtype(Type.usize, mod);
const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul });
const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op });
try func.lowerToStack(ptr);
try func.emitWValue(offset);
- try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(func.target))));
+ try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(mod))));
try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
@@ -4408,6 +4622,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
}
fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
@@ -4416,14 +4631,21 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const ptr = try func.resolveInst(bin_op.lhs);
- const ptr_ty = func.air.typeOf(bin_op.lhs);
+ const ptr_ty = func.typeOf(bin_op.lhs);
const value = try func.resolveInst(bin_op.rhs);
- const len = switch (ptr_ty.ptrSize()) {
+ const len = switch (ptr_ty.ptrSize(mod)) {
.Slice => try func.sliceLen(ptr),
- .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType().arrayLen()) }),
+ .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType(mod).arrayLen(mod)) }),
.C, .Many => unreachable,
};
- try func.memset(ptr, len, value);
+
+ const elem_ty = if (ptr_ty.ptrSize(mod) == .One)
+ ptr_ty.childType(mod).childType(mod)
+ else
+ ptr_ty.childType(mod);
+
+ const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty);
+ try func.memset(elem_ty, dst_ptr, len, value);
func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
}
@@ -4432,10 +4654,13 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
/// When the user has enabled the bulk_memory feature, we lower
/// this to wasm's memset instruction. When the feature is not present,
/// we implement it manually.
-fn memset(func: *CodeGen, ptr: WValue, len: WValue, value: WValue) InnerError!void {
+fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
+ const abi_size = @intCast(u32, elem_ty.abiSize(mod));
+
// When bulk_memory is enabled, we lower it to wasm's memset instruction.
- // If not, we lower it ourselves
- if (std.Target.wasm.featureSetHas(func.target.cpu.features, .bulk_memory)) {
+ // If not, we lower it ourselves.
+ if (std.Target.wasm.featureSetHas(func.target.cpu.features, .bulk_memory) and abi_size == 1) {
try func.lowerToStack(ptr);
try func.emitWValue(value);
try func.emitWValue(len);
@@ -4443,101 +4668,107 @@ fn memset(func: *CodeGen, ptr: WValue, len: WValue, value: WValue) InnerError!vo
return;
}
- // When the length is comptime-known we do the loop at codegen, rather
- // than emitting a runtime loop into the binary
- switch (len) {
- .imm32, .imm64 => {
- const length = switch (len) {
- .imm32 => |val| val,
- .imm64 => |val| val,
- else => unreachable,
- };
-
- var offset: u32 = 0;
- const base = ptr.offset();
- while (offset < length) : (offset += 1) {
- try func.emitWValue(ptr);
- try func.emitWValue(value);
- switch (func.arch()) {
- .wasm32 => {
- try func.addMemArg(.i32_store8, .{ .offset = base + offset, .alignment = 1 });
- },
- .wasm64 => {
- try func.addMemArg(.i64_store8, .{ .offset = base + offset, .alignment = 1 });
- },
- else => unreachable,
- }
- }
- },
- else => {
- // TODO: We should probably lower this to a call to compiler_rt
- // But for now, we implement it manually
- const offset = try func.ensureAllocLocal(Type.usize); // local for counter
- // outer block to jump to when loop is done
- try func.startBlock(.block, wasm.block_empty);
- try func.startBlock(.loop, wasm.block_empty);
- try func.emitWValue(offset);
+ const final_len = switch (len) {
+ .imm32 => |val| WValue{ .imm32 = val * abi_size },
+ .imm64 => |val| WValue{ .imm64 = val * abi_size },
+ else => if (abi_size != 1) blk: {
+ const new_len = try func.ensureAllocLocal(Type.usize);
try func.emitWValue(len);
switch (func.arch()) {
- .wasm32 => try func.addTag(.i32_eq),
- .wasm64 => try func.addTag(.i64_eq),
- else => unreachable,
- }
- try func.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
- try func.emitWValue(ptr);
- try func.emitWValue(offset);
- switch (func.arch()) {
- .wasm32 => try func.addTag(.i32_add),
- .wasm64 => try func.addTag(.i64_add),
- else => unreachable,
- }
- try func.emitWValue(value);
- const mem_store_op: Mir.Inst.Tag = switch (func.arch()) {
- .wasm32 => .i32_store8,
- .wasm64 => .i64_store8,
- else => unreachable,
- };
- try func.addMemArg(mem_store_op, .{ .offset = ptr.offset(), .alignment = 1 });
- try func.emitWValue(offset);
- try func.addImm32(1);
- switch (func.arch()) {
- .wasm32 => try func.addTag(.i32_add),
- .wasm64 => try func.addTag(.i64_add),
+ .wasm32 => {
+ try func.emitWValue(.{ .imm32 = abi_size });
+ try func.addTag(.i32_mul);
+ },
+ .wasm64 => {
+ try func.emitWValue(.{ .imm64 = abi_size });
+ try func.addTag(.i64_mul);
+ },
else => unreachable,
}
- try func.addLabel(.local_set, offset.local.value);
- try func.addLabel(.br, 0); // jump to start of loop
- try func.endBlock();
- try func.endBlock();
+ try func.addLabel(.local_set, new_len.local.value);
+ break :blk new_len;
+ } else len,
+ };
+
+ var end_ptr = try func.allocLocal(Type.usize);
+ defer end_ptr.free(func);
+ var new_ptr = try func.buildPointerOffset(ptr, 0, .new);
+ defer new_ptr.free(func);
+
+ // get the loop conditional: if current pointer address equals final pointer's address
+ try func.lowerToStack(ptr);
+ try func.emitWValue(final_len);
+ switch (func.arch()) {
+ .wasm32 => try func.addTag(.i32_add),
+ .wasm64 => try func.addTag(.i64_add),
+ else => unreachable,
+ }
+ try func.addLabel(.local_set, end_ptr.local.value);
+
+ // outer block to jump to when loop is done
+ try func.startBlock(.block, wasm.block_empty);
+ try func.startBlock(.loop, wasm.block_empty);
+
+ // check for codition for loop end
+ try func.emitWValue(new_ptr);
+ try func.emitWValue(end_ptr);
+ switch (func.arch()) {
+ .wasm32 => try func.addTag(.i32_eq),
+ .wasm64 => try func.addTag(.i64_eq),
+ else => unreachable,
+ }
+ try func.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
+
+ // store the value at the current position of the pointer
+ try func.store(new_ptr, value, elem_ty, 0);
+
+ // move the pointer to the next element
+ try func.emitWValue(new_ptr);
+ switch (func.arch()) {
+ .wasm32 => {
+ try func.emitWValue(.{ .imm32 = abi_size });
+ try func.addTag(.i32_add);
+ },
+ .wasm64 => {
+ try func.emitWValue(.{ .imm64 = abi_size });
+ try func.addTag(.i64_add);
},
+ else => unreachable,
}
+ try func.addLabel(.local_set, new_ptr.local.value);
+
+ // end of loop
+ try func.addLabel(.br, 0); // jump to start of loop
+ try func.endBlock();
+ try func.endBlock();
}
fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- const array_ty = func.air.typeOf(bin_op.lhs);
+ const array_ty = func.typeOf(bin_op.lhs);
const array = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
- const elem_ty = array_ty.childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const elem_ty = array_ty.childType(mod);
+ const elem_size = elem_ty.abiSize(mod);
- if (isByRef(array_ty, func.target)) {
+ if (isByRef(array_ty, mod)) {
try func.lowerToStack(array);
try func.emitWValue(index);
try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
} else {
- std.debug.assert(array_ty.zigTypeTag() == .Vector);
+ std.debug.assert(array_ty.zigTypeTag(mod) == .Vector);
switch (index) {
inline .imm32, .imm64 => |lane| {
- const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(func.target)) {
- 8 => if (elem_ty.isSignedInt()) .i8x16_extract_lane_s else .i8x16_extract_lane_u,
- 16 => if (elem_ty.isSignedInt()) .i16x8_extract_lane_s else .i16x8_extract_lane_u,
- 32 => if (elem_ty.isInt()) .i32x4_extract_lane else .f32x4_extract_lane,
- 64 => if (elem_ty.isInt()) .i64x2_extract_lane else .f64x2_extract_lane,
+ const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(mod)) {
+ 8 => if (elem_ty.isSignedInt(mod)) .i8x16_extract_lane_s else .i8x16_extract_lane_u,
+ 16 => if (elem_ty.isSignedInt(mod)) .i16x8_extract_lane_s else .i16x8_extract_lane_u,
+ 32 => if (elem_ty.isInt(mod)) .i32x4_extract_lane else .f32x4_extract_lane,
+ 64 => if (elem_ty.isInt(mod)) .i64x2_extract_lane else .f64x2_extract_lane,
else => unreachable,
};
@@ -4569,7 +4800,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var result = try func.allocLocal(Type.usize);
try func.addLabel(.local_set, result.local.value);
- if (isByRef(elem_ty, func.target)) {
+ if (isByRef(elem_ty, mod)) {
break :val result;
}
defer result.free(func); // only free if no longer needed and not returned like above
@@ -4582,22 +4813,23 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const dest_ty = func.air.typeOfIndex(inst);
- const op_ty = func.air.typeOf(ty_op.operand);
+ const dest_ty = func.typeOfIndex(inst);
+ const op_ty = func.typeOf(ty_op.operand);
- if (op_ty.abiSize(func.target) > 8) {
+ if (op_ty.abiSize(mod) > 8) {
return func.fail("TODO: floatToInt for integers/floats with bitsize larger than 64 bits", .{});
}
try func.emitWValue(operand);
const op = buildOpcode(.{
.op = .trunc,
- .valtype1 = typeToValtype(dest_ty, func.target),
- .valtype2 = typeToValtype(op_ty, func.target),
- .signedness = if (dest_ty.isSignedInt()) .signed else .unsigned,
+ .valtype1 = typeToValtype(dest_ty, mod),
+ .valtype2 = typeToValtype(op_ty, mod),
+ .signedness = if (dest_ty.isSignedInt(mod)) .signed else .unsigned,
});
try func.addTag(Mir.Inst.Tag.fromOpcode(op));
const wrapped = try func.wrapOperand(.{ .stack = {} }, dest_ty);
@@ -4606,22 +4838,23 @@ fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const dest_ty = func.air.typeOfIndex(inst);
- const op_ty = func.air.typeOf(ty_op.operand);
+ const dest_ty = func.typeOfIndex(inst);
+ const op_ty = func.typeOf(ty_op.operand);
- if (op_ty.abiSize(func.target) > 8) {
+ if (op_ty.abiSize(mod) > 8) {
return func.fail("TODO: intToFloat for integers/floats with bitsize larger than 64 bits", .{});
}
try func.emitWValue(operand);
const op = buildOpcode(.{
.op = .convert,
- .valtype1 = typeToValtype(dest_ty, func.target),
- .valtype2 = typeToValtype(op_ty, func.target),
- .signedness = if (op_ty.isSignedInt()) .signed else .unsigned,
+ .valtype1 = typeToValtype(dest_ty, mod),
+ .valtype2 = typeToValtype(op_ty, mod),
+ .signedness = if (op_ty.isSignedInt(mod)) .signed else .unsigned,
});
try func.addTag(Mir.Inst.Tag.fromOpcode(op));
@@ -4631,18 +4864,19 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const ty = func.air.typeOfIndex(inst);
- const elem_ty = ty.childType();
+ const ty = func.typeOfIndex(inst);
+ const elem_ty = ty.childType(mod);
- if (determineSimdStoreStrategy(ty, func.target) == .direct) blk: {
+ if (determineSimdStoreStrategy(ty, mod) == .direct) blk: {
switch (operand) {
// when the operand lives in the linear memory section, we can directly
// load and splat the value at once. Meaning we do not first have to load
// the scalar value onto the stack.
.stack_offset, .memory, .memory_offset => {
- const opcode = switch (elem_ty.bitSize(func.target)) {
+ const opcode = switch (elem_ty.bitSize(mod)) {
8 => std.wasm.simdOpcode(.v128_load8_splat),
16 => std.wasm.simdOpcode(.v128_load16_splat),
32 => std.wasm.simdOpcode(.v128_load32_splat),
@@ -4657,18 +4891,18 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
opcode,
operand.offset(),
- elem_ty.abiAlignment(func.target),
+ elem_ty.abiAlignment(mod),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
try func.addLabel(.local_set, result.local.value);
return func.finishAir(inst, result, &.{ty_op.operand});
},
.local => {
- const opcode = switch (elem_ty.bitSize(func.target)) {
+ const opcode = switch (elem_ty.bitSize(mod)) {
8 => std.wasm.simdOpcode(.i8x16_splat),
16 => std.wasm.simdOpcode(.i16x8_splat),
- 32 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat),
- 64 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat),
+ 32 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat),
+ 64 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat),
else => break :blk, // Cannot make use of simd-instructions
};
const result = try func.allocLocal(ty);
@@ -4682,14 +4916,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
}
}
- const elem_size = elem_ty.bitSize(func.target);
- const vector_len = @intCast(usize, ty.vectorLen());
+ const elem_size = elem_ty.bitSize(mod);
+ const vector_len = @intCast(usize, ty.vectorLen(mod));
if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) {
return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
}
const result = try func.allocStack(ty);
- const elem_byte_size = @intCast(u32, elem_ty.abiSize(func.target));
+ const elem_byte_size = @intCast(u32, elem_ty.abiSize(mod));
var index: usize = 0;
var offset: u32 = 0;
while (index < vector_len) : (index += 1) {
@@ -4709,26 +4943,25 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const inst_ty = func.air.typeOfIndex(inst);
+ const mod = func.bin_file.base.options.module.?;
+ const inst_ty = func.typeOfIndex(inst);
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try func.resolveInst(extra.a);
const b = try func.resolveInst(extra.b);
- const mask = func.air.values[extra.mask];
+ const mask = extra.mask.toValue();
const mask_len = extra.mask_len;
- const child_ty = inst_ty.childType();
- const elem_size = child_ty.abiSize(func.target);
+ const child_ty = inst_ty.childType(mod);
+ const elem_size = child_ty.abiSize(mod);
- const module = func.bin_file.base.options.module.?;
// TODO: One of them could be by ref; handle in loop
- if (isByRef(func.air.typeOf(extra.a), func.target) or isByRef(inst_ty, func.target)) {
+ if (isByRef(func.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) {
const result = try func.allocStack(inst_ty);
for (0..mask_len) |index| {
- var buf: Value.ElemValueBuffer = undefined;
- const value = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target);
+ const value = (try mask.elemValue(mod, index)).toSignedInt(mod);
try func.emitWValue(result);
@@ -4748,8 +4981,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var lanes = std.mem.asBytes(operands[1..]);
for (0..@intCast(usize, mask_len)) |index| {
- var buf: Value.ElemValueBuffer = undefined;
- const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target);
+ const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod);
const base_index = if (mask_elem >= 0)
@intCast(u8, @intCast(i64, elem_size) * mask_elem)
else
@@ -4780,22 +5012,26 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
- const result_ty = func.air.typeOfIndex(inst);
- const len = @intCast(usize, result_ty.arrayLen());
+ const result_ty = func.typeOfIndex(inst);
+ const len = @intCast(usize, result_ty.arrayLen(mod));
const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]);
const result: WValue = result_value: {
- switch (result_ty.zigTypeTag()) {
+ switch (result_ty.zigTypeTag(mod)) {
.Array => {
const result = try func.allocStack(result_ty);
- const elem_ty = result_ty.childType();
- const elem_size = @intCast(u32, elem_ty.abiSize(func.target));
+ const elem_ty = result_ty.childType(mod);
+ const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+ const sentinel = if (result_ty.sentinel(mod)) |sent| blk: {
+ break :blk try func.lowerConstant(sent, elem_ty);
+ } else null;
// When the element type is by reference, we must copy the entire
// value. It is therefore safer to move the offset pointer and store
// each value individually, instead of using store offsets.
- if (isByRef(elem_ty, func.target)) {
+ if (isByRef(elem_ty, mod)) {
// copy stack pointer into a temporary local, which is
// moved for each element to store each value in the right position.
const offset = try func.buildPointerOffset(result, 0, .new);
@@ -4803,10 +5039,13 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const elem_val = try func.resolveInst(elem);
try func.store(offset, elem_val, elem_ty, 0);
- if (elem_index < elements.len - 1) {
+ if (elem_index < elements.len - 1 and sentinel == null) {
_ = try func.buildPointerOffset(offset, elem_size, .modify);
}
}
+ if (sentinel) |sent| {
+ try func.store(offset, sent, elem_ty, 0);
+ }
} else {
var offset: u32 = 0;
for (elements) |elem| {
@@ -4814,36 +5053,42 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.store(result, elem_val, elem_ty, offset);
offset += elem_size;
}
+ if (sentinel) |sent| {
+ try func.store(result, sent, elem_ty, offset);
+ }
}
break :result_value result;
},
- .Struct => switch (result_ty.containerLayout()) {
+ .Struct => switch (result_ty.containerLayout(mod)) {
.Packed => {
- if (isByRef(result_ty, func.target)) {
+ if (isByRef(result_ty, mod)) {
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
}
- const struct_obj = result_ty.castTag(.@"struct").?.data;
+ const struct_obj = mod.typeToStruct(result_ty).?;
const fields = struct_obj.fields.values();
const backing_type = struct_obj.backing_int_ty;
- // we ensure a new local is created so it's zero-initialized
- const result = try func.ensureAllocLocal(backing_type);
+
+ // ensure the result is zero'd
+ const result = try func.allocLocal(backing_type);
+ if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
+ try func.addImm32(0)
+ else
+ try func.addImm64(0);
+ try func.addLabel(.local_set, result.local.value);
+
var current_bit: u16 = 0;
for (elements, 0..) |elem, elem_index| {
const field = fields[elem_index];
- if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- const shift_val = if (struct_obj.backing_int_ty.bitSize(func.target) <= 32)
+ const shift_val = if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
WValue{ .imm32 = current_bit }
else
WValue{ .imm64 = current_bit };
const value = try func.resolveInst(elem);
- const value_bit_size = @intCast(u16, field.ty.bitSize(func.target));
- var int_ty_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = value_bit_size,
- };
- const int_ty = Type.initPayload(&int_ty_payload.base);
+ const value_bit_size = @intCast(u16, field.ty.bitSize(mod));
+ const int_ty = try mod.intType(.unsigned, value_bit_size);
// load our current result on stack so we can perform all transformations
// using only stack values. Saving the cost of loads and stores.
@@ -4865,10 +5110,10 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result = try func.allocStack(result_ty);
const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset
for (elements, 0..) |elem, elem_index| {
- if (result_ty.structFieldValueComptime(elem_index) != null) continue;
+ if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue;
- const elem_ty = result_ty.structFieldType(elem_index);
- const elem_size = @intCast(u32, elem_ty.abiSize(func.target));
+ const elem_ty = result_ty.structFieldType(elem_index, mod);
+ const elem_size = @intCast(u32, elem_ty.abiSize(mod));
const value = try func.resolveInst(elem);
try func.store(offset, value, elem_ty, 0);
@@ -4884,42 +5129,88 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
}
};
- // TODO: this is incorrect Liveness handling code
- func.finishAir(inst, result, &.{});
+
+ if (elements.len <= Liveness.bpi - 1) {
+ var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
+ @memcpy(buf[0..elements.len], elements);
+ return func.finishAir(inst, result, &buf);
+ }
+ var bt = try func.iterateBigTomb(inst, elements.len);
+ for (elements) |arg| bt.feed(arg);
+ return bt.finishAir(result);
}
fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data;
const result = result: {
- const union_ty = func.air.typeOfIndex(inst);
- const layout = union_ty.unionGetLayout(func.target);
+ const union_ty = func.typeOfIndex(inst);
+ const layout = union_ty.unionGetLayout(mod);
+ const union_obj = mod.typeToUnion(union_ty).?;
+ const field = union_obj.fields.values()[extra.field_index];
+ const field_name = union_obj.fields.keys()[extra.field_index];
+
+ const tag_int = blk: {
+ const tag_ty = union_ty.unionTagTypeHypothetical(mod);
+ const enum_field_index = tag_ty.enumFieldIndex(field_name, mod).?;
+ const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
+ break :blk try func.lowerConstant(tag_val, tag_ty);
+ };
if (layout.payload_size == 0) {
if (layout.tag_size == 0) {
break :result WValue{ .none = {} };
}
- assert(!isByRef(union_ty, func.target));
- break :result WValue{ .imm32 = extra.field_index };
+ assert(!isByRef(union_ty, mod));
+ break :result tag_int;
}
- assert(isByRef(union_ty, func.target));
- const result_ptr = try func.allocStack(union_ty);
- const payload = try func.resolveInst(extra.init);
- const union_obj = union_ty.cast(Type.Payload.Union).?.data;
- assert(union_obj.haveFieldTypes());
- const field = union_obj.fields.values()[extra.field_index];
+ if (isByRef(union_ty, mod)) {
+ const result_ptr = try func.allocStack(union_ty);
+ const payload = try func.resolveInst(extra.init);
+ if (layout.tag_align >= layout.payload_align) {
+ if (isByRef(field.ty, mod)) {
+ const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
+ try func.store(payload_ptr, payload, field.ty, 0);
+ } else {
+ try func.store(result_ptr, payload, field.ty, @intCast(u32, layout.tag_size));
+ }
- if (layout.tag_align >= layout.payload_align) {
- const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
- try func.store(payload_ptr, payload, field.ty, 0);
+ if (layout.tag_size > 0) {
+ try func.store(result_ptr, tag_int, union_obj.tag_ty, 0);
+ }
+ } else {
+ try func.store(result_ptr, payload, field.ty, 0);
+ if (layout.tag_size > 0) {
+ try func.store(
+ result_ptr,
+ tag_int,
+ union_obj.tag_ty,
+ @intCast(u32, layout.payload_size),
+ );
+ }
+ }
+ break :result result_ptr;
} else {
- try func.store(result_ptr, payload, field.ty, 0);
+ const operand = try func.resolveInst(extra.init);
+ const union_int_type = try mod.intType(.unsigned, @intCast(u16, union_ty.bitSize(mod)));
+ if (field.ty.zigTypeTag(mod) == .Float) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod)));
+ const bitcasted = try func.bitcast(field.ty, int_type, operand);
+ const casted = try func.trunc(bitcasted, int_type, union_int_type);
+ break :result try casted.toLocal(func, field.ty);
+ } else if (field.ty.isPtrAtRuntime(mod)) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod)));
+ const casted = try func.intcast(operand, int_type, union_int_type);
+ break :result try casted.toLocal(func, field.ty);
+ }
+ const casted = try func.intcast(operand, field.ty, union_int_type);
+ break :result try casted.toLocal(func, field.ty);
}
- break :result result_ptr;
};
- func.finishAir(inst, result, &.{extra.init});
+ return func.finishAir(inst, result, &.{extra.init});
}
fn airPrefetch(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
@@ -4930,7 +5221,7 @@ fn airPrefetch(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airWasmMemorySize(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
- const result = try func.allocLocal(func.air.typeOfIndex(inst));
+ const result = try func.allocLocal(func.typeOfIndex(inst));
try func.addLabel(.memory_size, pl_op.payload);
try func.addLabel(.local_set, result.local.value);
func.finishAir(inst, result, &.{pl_op.operand});
@@ -4940,7 +5231,7 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void {
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
const operand = try func.resolveInst(pl_op.operand);
- const result = try func.allocLocal(func.air.typeOfIndex(inst));
+ const result = try func.allocLocal(func.typeOfIndex(inst));
try func.emitWValue(operand);
try func.addLabel(.memory_grow, pl_op.payload);
try func.addLabel(.local_set, result.local.value);
@@ -4948,14 +5239,14 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void {
}
fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
- assert(operand_ty.hasRuntimeBitsIgnoreComptime());
+ const mod = func.bin_file.base.options.module.?;
+ assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod));
assert(op == .eq or op == .neq);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = operand_ty.optionalChild(&buf);
+ const payload_ty = operand_ty.optionalChild(mod);
// We store the final result in here that will be validated
// if the optional is truly equal.
- var result = try func.ensureAllocLocal(Type.initTag(.i32));
+ var result = try func.ensureAllocLocal(Type.i32);
defer result.free(func);
try func.startBlock(.block, wasm.block_empty);
@@ -4966,7 +5257,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
_ = try func.load(lhs, payload_ty, 0);
_ = try func.load(rhs, payload_ty, 0);
- const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, func.target) });
+ const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, mod) });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
try func.addLabel(.br_if, 0);
@@ -4984,10 +5275,11 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
/// NOTE: Leaves the result of the comparison on top of the stack.
/// TODO: Lower this to compiler_rt call when bitsize > 128
fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
- assert(operand_ty.abiSize(func.target) >= 16);
+ const mod = func.bin_file.base.options.module.?;
+ assert(operand_ty.abiSize(mod) >= 16);
assert(!(lhs != .stack and rhs == .stack));
- if (operand_ty.intInfo(func.target).bits > 128) {
- return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.intInfo(func.target).bits});
+ if (operand_ty.bitSize(mod) > 128) {
+ return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(mod)});
}
var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
@@ -5010,7 +5302,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
}
},
else => {
- const ty = if (operand_ty.isSignedInt()) Type.i64 else Type.u64;
+ const ty = if (operand_ty.isSignedInt(mod)) Type.i64 else Type.u64;
// leave those value on top of the stack for '.select'
const lhs_low_bit = try func.load(lhs, Type.u64, 8);
const rhs_low_bit = try func.load(rhs, Type.u64, 8);
@@ -5025,10 +5317,11 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
}
fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- const un_ty = func.air.typeOf(bin_op.lhs).childType();
- const tag_ty = func.air.typeOf(bin_op.rhs);
- const layout = un_ty.unionGetLayout(func.target);
+ const un_ty = func.typeOf(bin_op.lhs).childType(mod);
+ const tag_ty = func.typeOf(bin_op.rhs);
+ const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const union_ptr = try func.resolveInst(bin_op.lhs);
@@ -5048,11 +5341,12 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- const un_ty = func.air.typeOf(ty_op.operand);
- const tag_ty = func.air.typeOfIndex(inst);
- const layout = un_ty.unionGetLayout(func.target);
+ const un_ty = func.typeOf(ty_op.operand);
+ const tag_ty = func.typeOfIndex(inst);
+ const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
@@ -5069,9 +5363,9 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airFpext(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- const dest_ty = func.air.typeOfIndex(inst);
+ const dest_ty = func.typeOfIndex(inst);
const operand = try func.resolveInst(ty_op.operand);
- const extended = try func.fpext(operand, func.air.typeOf(ty_op.operand), dest_ty);
+ const extended = try func.fpext(operand, func.typeOf(ty_op.operand), dest_ty);
const result = try extended.toLocal(func, dest_ty);
func.finishAir(inst, result, &.{ty_op.operand});
}
@@ -5090,7 +5384,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!
// call __extendhfsf2(f16) f32
const f32_result = try func.callIntrinsic(
"__extendhfsf2",
- &.{Type.f16},
+ &.{.f16_type},
Type.f32,
&.{operand},
);
@@ -5108,15 +5402,15 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!
target_util.compilerRtFloatAbbrev(wanted_bits),
}) catch unreachable;
- return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand});
+ return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand});
}
fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- const dest_ty = func.air.typeOfIndex(inst);
+ const dest_ty = func.typeOfIndex(inst);
const operand = try func.resolveInst(ty_op.operand);
- const truncated = try func.fptrunc(operand, func.air.typeOf(ty_op.operand), dest_ty);
+ const truncated = try func.fptrunc(operand, func.typeOf(ty_op.operand), dest_ty);
const result = try truncated.toLocal(func, dest_ty);
func.finishAir(inst, result, &.{ty_op.operand});
}
@@ -5139,7 +5433,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
} else operand;
// call __truncsfhf2(f32) f16
- return func.callIntrinsic("__truncsfhf2", &.{Type.f32}, Type.f16, &.{op});
+ return func.callIntrinsic("__truncsfhf2", &.{.f32_type}, Type.f16, &.{op});
}
var fn_name_buf: [12]u8 = undefined;
@@ -5148,14 +5442,15 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
target_util.compilerRtFloatAbbrev(wanted_bits),
}) catch unreachable;
- return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand});
+ return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand});
}
fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- const err_set_ty = func.air.typeOf(ty_op.operand).childType();
- const payload_ty = err_set_ty.errorUnionPayload();
+ const err_set_ty = func.typeOf(ty_op.operand).childType(mod);
+ const payload_ty = err_set_ty.errorUnionPayload(mod);
const operand = try func.resolveInst(ty_op.operand);
// set error-tag to '0' to annotate error union is non-error
@@ -5163,26 +5458,27 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
operand,
.{ .imm32 = 0 },
Type.anyerror,
- @intCast(u32, errUnionErrorOffset(payload_ty, func.target)),
+ @intCast(u32, errUnionErrorOffset(payload_ty, mod)),
);
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)), .new);
+ break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, mod)), .new);
};
func.finishAir(inst, result, &.{ty_op.operand});
}
fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const field_ptr = try func.resolveInst(extra.field_ptr);
- const parent_ty = func.air.getRefType(ty_pl.ty).childType();
- const field_offset = parent_ty.structFieldOffset(extra.field_index, func.target);
+ const parent_ty = func.air.getRefType(ty_pl.ty).childType(mod);
+ const field_offset = parent_ty.structFieldOffset(extra.field_index, mod);
const result = if (field_offset != 0) result: {
const base = try func.buildPointerOffset(field_ptr, 0, .new);
@@ -5197,7 +5493,8 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue {
- if (ptr_ty.isSlice()) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ptr_ty.isSlice(mod)) {
return func.slicePtr(ptr);
} else {
return ptr;
@@ -5205,14 +5502,27 @@ fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue
}
fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const dst = try func.resolveInst(bin_op.lhs);
- const dst_ty = func.air.typeOf(bin_op.lhs);
+ const dst_ty = func.typeOf(bin_op.lhs);
+ const ptr_elem_ty = dst_ty.childType(mod);
const src = try func.resolveInst(bin_op.rhs);
- const src_ty = func.air.typeOf(bin_op.rhs);
- const len = switch (dst_ty.ptrSize()) {
- .Slice => try func.sliceLen(dst),
- .One => @as(WValue, .{ .imm64 = dst_ty.childType().arrayLen() }),
+ const src_ty = func.typeOf(bin_op.rhs);
+ const len = switch (dst_ty.ptrSize(mod)) {
+ .Slice => blk: {
+ const slice_len = try func.sliceLen(dst);
+ if (ptr_elem_ty.abiSize(mod) != 1) {
+ try func.emitWValue(slice_len);
+ try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(mod)) });
+ try func.addTag(.i32_mul);
+ try func.addLabel(.local_set, slice_len.local.value);
+ }
+ break :blk slice_len;
+ },
+ .One => @as(WValue, .{
+ .imm32 = @intCast(u32, ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod)),
+ }),
.C, .Many => unreachable,
};
const dst_ptr = try func.sliceOrArrayPtr(dst, dst_ty);
@@ -5232,17 +5542,18 @@ fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const op_ty = func.air.typeOf(ty_op.operand);
- const result_ty = func.air.typeOfIndex(inst);
+ const op_ty = func.typeOf(ty_op.operand);
+ const result_ty = func.typeOfIndex(inst);
- if (op_ty.zigTypeTag() == .Vector) {
+ if (op_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement @popCount for vectors", .{});
}
- const int_info = op_ty.intInfo(func.target);
+ const int_info = op_ty.intInfo(mod);
const bits = int_info.bits;
const wasm_bits = toWasmBits(bits) orelse {
return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits});
@@ -5291,8 +5602,9 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// As the names are global and the slice elements are constant, we do not have
// to make a copy of the ptr+value but can point towards them directly.
const error_table_symbol = try func.bin_file.getErrorTableSymbol();
- const name_ty = Type.initTag(.const_slice_u8_sentinel_0);
- const abi_size = name_ty.abiSize(func.target);
+ const name_ty = Type.slice_const_u8_sentinel_0;
+ const mod = func.bin_file.base.options.module.?;
+ const abi_size = name_ty.abiSize(mod);
const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation
try func.emitWValue(error_name_value);
@@ -5330,20 +5642,21 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
const lhs_op = try func.resolveInst(extra.lhs);
const rhs_op = try func.resolveInst(extra.rhs);
- const lhs_ty = func.air.typeOf(extra.lhs);
+ const lhs_ty = func.typeOf(extra.lhs);
+ const mod = func.bin_file.base.options.module.?;
- if (lhs_ty.zigTypeTag() == .Vector) {
+ if (lhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
- const int_info = lhs_ty.intInfo(func.target);
+ const int_info = lhs_ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits});
};
if (wasm_bits == 128) {
- const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.air.typeOfIndex(inst), op);
+ const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.typeOfIndex(inst), op);
return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
}
@@ -5372,11 +5685,10 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
};
var bin_op = try (try func.binOp(lhs, rhs, lhs_ty, op)).toLocal(func, lhs_ty);
- defer bin_op.free(func);
var result = if (wasm_bits != int_info.bits) blk: {
break :blk try (try func.wrapOperand(bin_op, lhs_ty)).toLocal(func, lhs_ty);
} else bin_op;
- defer result.free(func); // no-op when wasm_bits == int_info.bits
+ defer result.free(func);
const cmp_op: std.math.CompareOperator = if (op == .sub) .gt else .lt;
const overflow_bit: WValue = if (is_signed) blk: {
@@ -5394,17 +5706,18 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
var overflow_local = try overflow_bit.toLocal(func, Type.u32);
defer overflow_local.free(func);
- const result_ptr = try func.allocStack(func.air.typeOfIndex(inst));
+ const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.store(result_ptr, result, lhs_ty, 0);
- const offset = @intCast(u32, lhs_ty.abiSize(func.target));
- try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
+ const offset = @intCast(u32, lhs_ty.abiSize(mod));
+ try func.store(result_ptr, overflow_local, Type.u1, offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
}
fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, result_ty: Type, op: Op) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
assert(op == .add or op == .sub);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
if (int_info.bits != 128) {
return func.fail("TODO: Implement @{{add/sub}}WithOverflow for integer bitsize '{d}'", .{int_info.bits});
@@ -5455,36 +5768,46 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type,
break :blk WValue{ .stack = {} };
};
- var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1));
+ var overflow_local = try overflow_bit.toLocal(func, Type.u1);
defer overflow_local.free(func);
const result_ptr = try func.allocStack(result_ty);
try func.store(result_ptr, high_op_res, Type.u64, 0);
try func.store(result_ptr, tmp_op, Type.u64, 8);
- try func.store(result_ptr, overflow_local, Type.initTag(.u1), 16);
+ try func.store(result_ptr, overflow_local, Type.u1, 16);
return result_ptr;
}
fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
const lhs = try func.resolveInst(extra.lhs);
const rhs = try func.resolveInst(extra.rhs);
- const lhs_ty = func.air.typeOf(extra.lhs);
+ const lhs_ty = func.typeOf(extra.lhs);
+ const rhs_ty = func.typeOf(extra.rhs);
- if (lhs_ty.zigTypeTag() == .Vector) {
+ if (lhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
- const int_info = lhs_ty.intInfo(func.target);
+ const int_info = lhs_ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
};
- var shl = try (try func.binOp(lhs, rhs, lhs_ty, .shl)).toLocal(func, lhs_ty);
+ // Ensure rhs is coerced to lhs as they must have the same WebAssembly types
+ // before we can perform any binary operation.
+ const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(mod).bits).?;
+ const rhs_final = if (wasm_bits != rhs_wasm_bits) blk: {
+ const rhs_casted = try func.intcast(rhs, rhs_ty, lhs_ty);
+ break :blk try rhs_casted.toLocal(func, lhs_ty);
+ } else rhs;
+
+ var shl = try (try func.binOp(lhs, rhs_final, lhs_ty, .shl)).toLocal(func, lhs_ty);
defer shl.free(func);
var result = if (wasm_bits != int_info.bits) blk: {
break :blk try (try func.wrapOperand(shl, lhs_ty)).toLocal(func, lhs_ty);
@@ -5495,20 +5818,20 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// emit lhs to stack to we can keep 'wrapped' on the stack also
try func.emitWValue(lhs);
const abs = try func.signAbsValue(shl, lhs_ty);
- const wrapped = try func.wrapBinOp(abs, rhs, lhs_ty, .shr);
+ const wrapped = try func.wrapBinOp(abs, rhs_final, lhs_ty, .shr);
break :blk try func.cmp(.{ .stack = {} }, wrapped, lhs_ty, .neq);
} else blk: {
try func.emitWValue(lhs);
- const shr = try func.binOp(result, rhs, lhs_ty, .shr);
+ const shr = try func.binOp(result, rhs_final, lhs_ty, .shr);
break :blk try func.cmp(.{ .stack = {} }, shr, lhs_ty, .neq);
};
- var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1));
+ var overflow_local = try overflow_bit.toLocal(func, Type.u1);
defer overflow_local.free(func);
- const result_ptr = try func.allocStack(func.air.typeOfIndex(inst));
+ const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.store(result_ptr, result, lhs_ty, 0);
- const offset = @intCast(u32, lhs_ty.abiSize(func.target));
- try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
+ const offset = @intCast(u32, lhs_ty.abiSize(mod));
+ try func.store(result_ptr, overflow_local, Type.u1, offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
}
@@ -5519,29 +5842,26 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const lhs = try func.resolveInst(extra.lhs);
const rhs = try func.resolveInst(extra.rhs);
- const lhs_ty = func.air.typeOf(extra.lhs);
+ const lhs_ty = func.typeOf(extra.lhs);
+ const mod = func.bin_file.base.options.module.?;
- if (lhs_ty.zigTypeTag() == .Vector) {
+ if (lhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
// We store the bit if it's overflowed or not in this. As it's zero-initialized
// we only need to update it if an overflow (or underflow) occurred.
- var overflow_bit = try func.ensureAllocLocal(Type.initTag(.u1));
+ var overflow_bit = try func.ensureAllocLocal(Type.u1);
defer overflow_bit.free(func);
- const int_info = lhs_ty.intInfo(func.target);
+ const int_info = lhs_ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits) orelse {
- return func.fail("TODO: Implement overflow arithmetic for integer bitsize: {d}", .{int_info.bits});
- };
-
- if (wasm_bits > 32) {
return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits});
- }
+ };
const zero = switch (wasm_bits) {
32 => WValue{ .imm32 = 0 },
- 64 => WValue{ .imm64 = 0 },
+ 64, 128 => WValue{ .imm64 = 0 },
else => unreachable,
};
@@ -5568,7 +5888,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addLabel(.local_set, overflow_bit.local.value);
break :blk down_cast;
}
- } else if (int_info.signedness == .signed) blk: {
+ } else if (int_info.signedness == .signed and wasm_bits == 32) blk: {
const lhs_abs = try func.signAbsValue(lhs, lhs_ty);
const rhs_abs = try func.signAbsValue(rhs, lhs_ty);
const bin_op = try (try func.binOp(lhs_abs, rhs_abs, lhs_ty, .mul)).toLocal(func, lhs_ty);
@@ -5576,7 +5896,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
_ = try func.cmp(mul_abs, bin_op, lhs_ty, .neq);
try func.addLabel(.local_set, overflow_bit.local.value);
break :blk try func.wrapOperand(bin_op, lhs_ty);
- } else blk: {
+ } else if (wasm_bits == 32) blk: {
var bin_op = try (try func.binOp(lhs, rhs, lhs_ty, .mul)).toLocal(func, lhs_ty);
defer bin_op.free(func);
const shift_imm = if (wasm_bits == 32)
@@ -5587,27 +5907,120 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
_ = try func.cmp(shr, zero, lhs_ty, .neq);
try func.addLabel(.local_set, overflow_bit.local.value);
break :blk try func.wrapOperand(bin_op, lhs_ty);
- };
+ } else if (int_info.bits == 64 and int_info.signedness == .unsigned) blk: {
+ const new_ty = Type.u128;
+ var lhs_upcast = try (try func.intcast(lhs, lhs_ty, new_ty)).toLocal(func, lhs_ty);
+ defer lhs_upcast.free(func);
+ var rhs_upcast = try (try func.intcast(rhs, lhs_ty, new_ty)).toLocal(func, lhs_ty);
+ defer rhs_upcast.free(func);
+ const bin_op = try func.binOp(lhs_upcast, rhs_upcast, new_ty, .mul);
+ const lsb = try func.load(bin_op, lhs_ty, 8);
+ _ = try func.cmp(lsb, zero, lhs_ty, .neq);
+ try func.addLabel(.local_set, overflow_bit.local.value);
+
+ break :blk try func.load(bin_op, lhs_ty, 0);
+ } else if (int_info.bits == 64 and int_info.signedness == .signed) blk: {
+ const shift_val: WValue = .{ .imm64 = 63 };
+ var lhs_shifted = try (try func.binOp(lhs, shift_val, lhs_ty, .shr)).toLocal(func, lhs_ty);
+ defer lhs_shifted.free(func);
+ var rhs_shifted = try (try func.binOp(rhs, shift_val, lhs_ty, .shr)).toLocal(func, lhs_ty);
+ defer rhs_shifted.free(func);
+
+ const bin_op = try func.callIntrinsic(
+ "__multi3",
+ &[_]InternPool.Index{.i64_type} ** 4,
+ Type.i128,
+ &.{ lhs, lhs_shifted, rhs, rhs_shifted },
+ );
+ const res = try func.allocLocal(lhs_ty);
+ const msb = try func.load(bin_op, lhs_ty, 0);
+ try func.addLabel(.local_tee, res.local.value);
+ const msb_shifted = try func.binOp(msb, shift_val, lhs_ty, .shr);
+ const lsb = try func.load(bin_op, lhs_ty, 8);
+ _ = try func.cmp(lsb, msb_shifted, lhs_ty, .neq);
+ try func.addLabel(.local_set, overflow_bit.local.value);
+ break :blk res;
+ } else if (int_info.bits == 128 and int_info.signedness == .unsigned) blk: {
+ var lhs_msb = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
+ defer lhs_msb.free(func);
+ var lhs_lsb = try (try func.load(lhs, Type.u64, 8)).toLocal(func, Type.u64);
+ defer lhs_lsb.free(func);
+ var rhs_msb = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64);
+ defer rhs_msb.free(func);
+ var rhs_lsb = try (try func.load(rhs, Type.u64, 8)).toLocal(func, Type.u64);
+ defer rhs_lsb.free(func);
+
+ const mul1 = try func.callIntrinsic(
+ "__multi3",
+ &[_]InternPool.Index{.i64_type} ** 4,
+ Type.i128,
+ &.{ lhs_lsb, zero, rhs_msb, zero },
+ );
+ const mul2 = try func.callIntrinsic(
+ "__multi3",
+ &[_]InternPool.Index{.i64_type} ** 4,
+ Type.i128,
+ &.{ rhs_lsb, zero, lhs_msb, zero },
+ );
+ const mul3 = try func.callIntrinsic(
+ "__multi3",
+ &[_]InternPool.Index{.i64_type} ** 4,
+ Type.i128,
+ &.{ lhs_msb, zero, rhs_msb, zero },
+ );
+
+ const rhs_lsb_not_zero = try func.cmp(rhs_lsb, zero, Type.u64, .neq);
+ const lhs_lsb_not_zero = try func.cmp(lhs_lsb, zero, Type.u64, .neq);
+ const lsb_and = try func.binOp(rhs_lsb_not_zero, lhs_lsb_not_zero, Type.bool, .@"and");
+ const mul1_lsb = try func.load(mul1, Type.u64, 8);
+ const mul1_lsb_not_zero = try func.cmp(mul1_lsb, zero, Type.u64, .neq);
+ const lsb_or1 = try func.binOp(lsb_and, mul1_lsb_not_zero, Type.bool, .@"or");
+ const mul2_lsb = try func.load(mul2, Type.u64, 8);
+ const mul2_lsb_not_zero = try func.cmp(mul2_lsb, zero, Type.u64, .neq);
+ const lsb_or = try func.binOp(lsb_or1, mul2_lsb_not_zero, Type.bool, .@"or");
+
+ const mul1_msb = try func.load(mul1, Type.u64, 0);
+ const mul2_msb = try func.load(mul2, Type.u64, 0);
+ const mul_add1 = try func.binOp(mul1_msb, mul2_msb, Type.u64, .add);
+
+ var mul3_lsb = try (try func.load(mul3, Type.u64, 8)).toLocal(func, Type.u64);
+ defer mul3_lsb.free(func);
+ var mul_add2 = try (try func.binOp(mul_add1, mul3_lsb, Type.u64, .add)).toLocal(func, Type.u64);
+ defer mul_add2.free(func);
+ const mul_add_lt = try func.cmp(mul_add2, mul3_lsb, Type.u64, .lt);
+
+ // result for overflow bit
+ _ = try func.binOp(lsb_or, mul_add_lt, Type.bool, .@"or");
+ try func.addLabel(.local_set, overflow_bit.local.value);
+
+ const tmp_result = try func.allocStack(Type.u128);
+ try func.emitWValue(tmp_result);
+ const mul3_msb = try func.load(mul3, Type.u64, 0);
+ try func.store(.stack, mul3_msb, Type.u64, tmp_result.offset());
+ try func.store(tmp_result, mul_add2, Type.u64, 8);
+ break :blk tmp_result;
+ } else return func.fail("TODO: @mulWithOverflow for integers between 32 and 64 bits", .{});
var bin_op_local = try bin_op.toLocal(func, lhs_ty);
defer bin_op_local.free(func);
- const result_ptr = try func.allocStack(func.air.typeOfIndex(inst));
+ const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.store(result_ptr, bin_op_local, lhs_ty, 0);
- const offset = @intCast(u32, lhs_ty.abiSize(func.target));
- try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset);
+ const offset = @intCast(u32, lhs_ty.abiSize(mod));
+ try func.store(result_ptr, overflow_bit, Type.u1, offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
}
fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- const ty = func.air.typeOfIndex(inst);
- if (ty.zigTypeTag() == .Vector) {
+ const ty = func.typeOfIndex(inst);
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{});
}
- if (ty.abiSize(func.target) > 16) {
+ if (ty.abiSize(mod) > 16) {
return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{});
}
@@ -5623,18 +6036,19 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE
try func.addTag(.select);
// store result in local
- const result_ty = if (isByRef(ty, func.target)) Type.u32 else ty;
+ const result_ty = if (isByRef(ty, mod)) Type.u32 else ty;
const result = try func.allocLocal(result_ty);
try func.addLabel(.local_set, result.local.value);
func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
- const ty = func.air.typeOfIndex(inst);
- if (ty.zigTypeTag() == .Vector) {
+ const ty = func.typeOfIndex(inst);
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: `@mulAdd` for vectors", .{});
}
@@ -5649,7 +6063,7 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// call to compiler-rt `fn fmaf(f32, f32, f32) f32`
var result = try func.callIntrinsic(
"fmaf",
- &.{ Type.f32, Type.f32, Type.f32 },
+ &.{ .f32_type, .f32_type, .f32_type },
Type.f32,
&.{ rhs_ext, lhs_ext, addend_ext },
);
@@ -5663,16 +6077,17 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- const ty = func.air.typeOf(ty_op.operand);
- const result_ty = func.air.typeOfIndex(inst);
- if (ty.zigTypeTag() == .Vector) {
+ const ty = func.typeOf(ty_op.operand);
+ const result_ty = func.typeOfIndex(inst);
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: `@clz` for vectors", .{});
}
const operand = try func.resolveInst(ty_op.operand);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
};
@@ -5715,17 +6130,18 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- const ty = func.air.typeOf(ty_op.operand);
- const result_ty = func.air.typeOfIndex(inst);
+ const ty = func.typeOf(ty_op.operand);
+ const result_ty = func.typeOfIndex(inst);
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: `@ctz` for vectors", .{});
}
const operand = try func.resolveInst(ty_op.operand);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
};
@@ -5782,7 +6198,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void {
if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{});
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
- const ty = func.air.typeOf(pl_op.operand);
+ const ty = func.typeOf(pl_op.operand);
const operand = try func.resolveInst(pl_op.operand);
log.debug("airDbgVar: %{d}: {}, {}", .{ inst, ty.fmtDebug(), operand });
@@ -5820,50 +6236,61 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const err_union = try func.resolveInst(pl_op.operand);
const extra = func.air.extraData(Air.Try, pl_op.payload);
const body = func.air.extra[extra.end..][0..extra.data.body_len];
- const err_union_ty = func.air.typeOf(pl_op.operand);
- const result = try lowerTry(func, err_union, body, err_union_ty, false);
+ const err_union_ty = func.typeOf(pl_op.operand);
+ const result = try lowerTry(func, inst, err_union, body, err_union_ty, false);
func.finishAir(inst, result, &.{pl_op.operand});
}
fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.TryPtr, ty_pl.payload);
const err_union_ptr = try func.resolveInst(extra.data.ptr);
const body = func.air.extra[extra.end..][0..extra.data.body_len];
- const err_union_ty = func.air.typeOf(extra.data.ptr).childType();
- const result = try lowerTry(func, err_union_ptr, body, err_union_ty, true);
+ const err_union_ty = func.typeOf(extra.data.ptr).childType(mod);
+ const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true);
func.finishAir(inst, result, &.{extra.data.ptr});
}
fn lowerTry(
func: *CodeGen,
+ inst: Air.Inst.Index,
err_union: WValue,
body: []const Air.Inst.Index,
err_union_ty: Type,
operand_is_ptr: bool,
) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
if (operand_is_ptr) {
return func.fail("TODO: lowerTry for pointers", .{});
}
- const pl_ty = err_union_ty.errorUnionPayload();
- const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime();
+ const pl_ty = err_union_ty.errorUnionPayload(mod);
+ const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod);
- if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) {
+ if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
// Block we can jump out of when error is not set
try func.startBlock(.block, wasm.block_empty);
// check if the error tag is set for the error union.
try func.emitWValue(err_union);
if (pl_has_bits) {
- const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target));
+ const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod));
try func.addMemArg(.i32_load16_u, .{
.offset = err_union.offset() + err_offset,
- .alignment = Type.anyerror.abiAlignment(func.target),
+ .alignment = Type.anyerror.abiAlignment(mod),
});
}
try func.addTag(.i32_eqz);
try func.addLabel(.br_if, 0); // jump out of block when error is '0'
+
+ const liveness = func.liveness.getCondBr(inst);
+ try func.branches.append(func.gpa, .{});
+ try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.else_deaths.len + liveness.then_deaths.len);
+ defer {
+ var branch = func.branches.pop();
+ branch.deinit(func.gpa);
+ }
try func.genBody(body);
try func.endBlock();
}
@@ -5873,8 +6300,8 @@ fn lowerTry(
return WValue{ .none = {} };
}
- const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, func.target));
- if (isByRef(pl_ty, func.target)) {
+ const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, mod));
+ if (isByRef(pl_ty, mod)) {
return buildPointerOffset(func, err_union, pl_offset, .new);
}
const payload = try func.load(err_union, pl_ty, pl_offset);
@@ -5882,15 +6309,16 @@ fn lowerTry(
}
fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- const ty = func.air.typeOfIndex(inst);
+ const ty = func.typeOfIndex(inst);
const operand = try func.resolveInst(ty_op.operand);
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: @byteSwap for vectors", .{});
}
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
// bytes are no-op
if (int_info.bits == 8) {
@@ -5952,31 +6380,54 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- const ty = func.air.typeOfIndex(inst);
+ const ty = func.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const result = if (ty.isSignedInt())
+ const result = if (ty.isSignedInt(mod))
try func.divSigned(lhs, rhs, ty)
else
try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty);
func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
+fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
+ const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+
+ const ty = func.typeOfIndex(inst);
+ const lhs = try func.resolveInst(bin_op.lhs);
+ const rhs = try func.resolveInst(bin_op.rhs);
+
+ const div_result = if (ty.isSignedInt(mod))
+ try func.divSigned(lhs, rhs, ty)
+ else
+ try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty);
+
+ if (ty.isAnyFloat()) {
+ const trunc_result = try (try func.floatOp(.trunc, ty, &.{div_result})).toLocal(func, ty);
+ return func.finishAir(inst, trunc_result, &.{ bin_op.lhs, bin_op.rhs });
+ }
+
+ return func.finishAir(inst, div_result, &.{ bin_op.lhs, bin_op.rhs });
+}
+
fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- const ty = func.air.typeOfIndex(inst);
+ const mod = func.bin_file.base.options.module.?;
+ const ty = func.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- if (ty.isUnsignedInt()) {
+ if (ty.isUnsignedInt(mod)) {
const result = try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty);
return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
- } else if (ty.isSignedInt()) {
- const int_bits = ty.intInfo(func.target).bits;
+ } else if (ty.isSignedInt(mod)) {
+ const int_bits = ty.intInfo(mod).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: `@divFloor` for signed integers larger than '{d}' bits", .{int_bits});
};
@@ -6054,7 +6505,8 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue {
- const int_bits = ty.intInfo(func.target).bits;
+ const mod = func.bin_file.base.options.module.?;
+ const int_bits = ty.intInfo(mod).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: Implement signed division for integers with bitsize '{d}'", .{int_bits});
};
@@ -6081,7 +6533,8 @@ fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WVal
/// Retrieves the absolute value of a signed integer
/// NOTE: Leaves the result value on the stack.
fn signAbsValue(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
- const int_bits = ty.intInfo(func.target).bits;
+ const mod = func.bin_file.base.options.module.?;
+ const int_bits = ty.intInfo(mod).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: signAbsValue for signed integers larger than '{d}' bits", .{int_bits});
};
@@ -6116,11 +6569,12 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
assert(op == .add or op == .sub);
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- const ty = func.air.typeOfIndex(inst);
+ const mod = func.bin_file.base.options.module.?;
+ const ty = func.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
if (int_info.bits > 64) {
@@ -6163,7 +6617,8 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
}
fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op: Op) InnerError!WValue {
- const int_info = ty.intInfo(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const int_info = ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits).?;
const is_wasm_bits = wasm_bits == int_info.bits;
@@ -6228,8 +6683,9 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type,
fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- const ty = func.air.typeOfIndex(inst);
- const int_info = ty.intInfo(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const ty = func.typeOfIndex(inst);
+ const int_info = ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
if (int_info.bits > 64) {
return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits});
@@ -6337,7 +6793,7 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn callIntrinsic(
func: *CodeGen,
name: []const u8,
- param_types: []const Type,
+ param_types: []const InternPool.Index,
return_type: Type,
args: []const WValue,
) InnerError!WValue {
@@ -6347,12 +6803,13 @@ fn callIntrinsic(
};
// Always pass over C-ABI
- var func_type = try genFunctype(func.gpa, .C, param_types, return_type, func.target);
+ const mod = func.bin_file.base.options.module.?;
+ var func_type = try genFunctype(func.gpa, .C, param_types, return_type, mod);
defer func_type.deinit(func.gpa);
const func_type_index = try func.bin_file.putOrGetFuncType(func_type);
try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index);
- const want_sret_param = firstParamSRet(.C, return_type, func.target);
+ const want_sret_param = firstParamSRet(.C, return_type, mod);
// if we want return as first param, we allocate a pointer to stack,
// and emit it as our first argument
const sret = if (want_sret_param) blk: {
@@ -6364,16 +6821,16 @@ fn callIntrinsic(
// Lower all arguments to the stack before we call our function
for (args, 0..) |arg, arg_i| {
assert(!(want_sret_param and arg == .stack));
- assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime());
- try func.lowerArg(.C, param_types[arg_i], arg);
+ assert(param_types[arg_i].toType().hasRuntimeBitsIgnoreComptime(mod));
+ try func.lowerArg(.C, param_types[arg_i].toType(), arg);
}
// Actually call our intrinsic
try func.addLabel(.call, symbol_index);
- if (!return_type.hasRuntimeBitsIgnoreComptime()) {
+ if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) {
return WValue.none;
- } else if (return_type.isNoReturn()) {
+ } else if (return_type.isNoReturn(mod)) {
try func.addTag(.@"unreachable");
return WValue.none;
} else if (want_sret_param) {
@@ -6386,11 +6843,11 @@ fn callIntrinsic(
fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
- const enum_ty = func.air.typeOf(un_op);
+ const enum_ty = func.typeOf(un_op);
const func_sym_index = try func.getTagNameFunction(enum_ty);
- const result_ptr = try func.allocStack(func.air.typeOfIndex(inst));
+ const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.lowerToStack(result_ptr);
try func.emitWValue(operand);
try func.addLabel(.call, func_sym_index);
@@ -6399,15 +6856,14 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
- const enum_decl_index = enum_ty.getOwnerDecl();
- const module = func.bin_file.base.options.module.?;
+ const mod = func.bin_file.base.options.module.?;
+ const enum_decl_index = enum_ty.getOwnerDecl(mod);
var arena_allocator = std.heap.ArenaAllocator.init(func.gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- const fqn = try module.declPtr(enum_decl_index).getFullyQualifiedName(module);
- defer module.gpa.free(fqn);
+ const fqn = mod.intern_pool.stringToSlice(try mod.declPtr(enum_decl_index).getFullyQualifiedName(mod));
const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn});
// check if we already generated code for this.
@@ -6415,10 +6871,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
return loc.index;
}
- var int_tag_type_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer);
+ const int_tag_ty = enum_ty.intTagType(mod);
- if (int_tag_ty.bitSize(func.target) > 64) {
+ if (int_tag_ty.bitSize(mod) > 64) {
return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{});
}
@@ -6438,36 +6893,22 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
// TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse.
// generate an if-else chain for each tag value as well as constant.
- for (enum_ty.enumFields().keys(), 0..) |tag_name, field_index| {
+ for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index_usize| {
+ const field_index = @intCast(u32, field_index_usize);
+ const tag_name = mod.intern_pool.stringToSlice(tag_name_ip);
// for each tag name, create an unnamed const,
// and then get a pointer to its value.
- var name_ty_payload: Type.Payload.Len = .{
- .base = .{ .tag = .array_u8_sentinel_0 },
- .data = @intCast(u64, tag_name.len),
- };
- const name_ty = Type.initPayload(&name_ty_payload.base);
- const string_bytes = &module.string_literal_bytes;
- try string_bytes.ensureUnusedCapacity(module.gpa, tag_name.len);
- const gop = try module.string_literal_table.getOrPutContextAdapted(module.gpa, tag_name, Module.StringLiteralAdapter{
- .bytes = string_bytes,
- }, Module.StringLiteralContext{
- .bytes = string_bytes,
+ const name_ty = try mod.arrayType(.{
+ .len = tag_name.len,
+ .child = .u8_type,
+ .sentinel = .zero_u8,
});
- if (!gop.found_existing) {
- gop.key_ptr.* = .{
- .index = @intCast(u32, string_bytes.items.len),
- .len = @intCast(u32, tag_name.len),
- };
- string_bytes.appendSliceAssumeCapacity(tag_name);
- gop.value_ptr.* = .none;
- }
- var name_val_payload: Value.Payload.StrLit = .{
- .base = .{ .tag = .str_lit },
- .data = gop.key_ptr.*,
- };
- const name_val = Value.initPayload(&name_val_payload.base);
+ const name_val = try mod.intern(.{ .aggregate = .{
+ .ty = name_ty.toIntern(),
+ .storage = .{ .bytes = tag_name },
+ } });
const tag_sym_index = try func.bin_file.lowerUnnamedConst(
- .{ .ty = name_ty, .val = name_val },
+ .{ .ty = name_ty, .val = name_val.toValue() },
enum_decl_index,
);
@@ -6479,11 +6920,8 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
try writer.writeByte(std.wasm.opcode(.local_get));
try leb.writeULEB128(writer, @as(u32, 1));
- var tag_val_payload: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, field_index),
- };
- const tag_value = try func.lowerConstant(Value.initPayload(&tag_val_payload.base), enum_ty);
+ const tag_val = try mod.enumValueFieldIndex(enum_ty, field_index);
+ const tag_value = try func.lowerConstant(tag_val, enum_ty);
switch (tag_value) {
.imm32 => |value| {
@@ -6568,27 +7006,27 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
// finish function body
try writer.writeByte(std.wasm.opcode(.end));
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
- const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, func.target);
+ const slice_ty = Type.slice_const_u8_sentinel_0;
+ const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod);
return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs);
}
fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const error_set_ty = func.air.getRefType(ty_op.ty);
const result = try func.allocLocal(Type.bool);
- const names = error_set_ty.errorSetNames();
+ const names = error_set_ty.errorSetNames(mod);
var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len);
defer values.deinit();
- const module = func.bin_file.base.options.module.?;
var lowest: ?u32 = null;
var highest: ?u32 = null;
for (names) |name| {
- const err_int = module.global_error_set.get(name).?;
+ const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?);
if (lowest) |*l| {
if (err_int < l.*) {
l.* = err_int;
@@ -6659,12 +7097,13 @@ inline fn useAtomicFeature(func: *const CodeGen) bool {
}
fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
- const ptr_ty = func.air.typeOf(extra.ptr);
- const ty = ptr_ty.childType();
- const result_ty = func.air.typeOfIndex(inst);
+ const ptr_ty = func.typeOf(extra.ptr);
+ const ty = ptr_ty.childType(mod);
+ const result_ty = func.typeOfIndex(inst);
const ptr_operand = try func.resolveInst(extra.ptr);
const expected_val = try func.resolveInst(extra.expected_value);
@@ -6677,7 +7116,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr_operand);
try func.lowerToStack(expected_val);
try func.lowerToStack(new_val);
- try func.addAtomicMemArg(switch (ty.abiSize(func.target)) {
+ try func.addAtomicMemArg(switch (ty.abiSize(mod)) {
1 => .i32_atomic_rmw8_cmpxchg_u,
2 => .i32_atomic_rmw16_cmpxchg_u,
4 => .i32_atomic_rmw_cmpxchg,
@@ -6685,14 +7124,14 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
}, .{
.offset = ptr_operand.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
});
try func.addLabel(.local_tee, val_local.local.value);
_ = try func.cmp(.stack, expected_val, ty, .eq);
try func.addLabel(.local_set, cmp_result.local.value);
break :val val_local;
} else val: {
- if (ty.abiSize(func.target) > 8) {
+ if (ty.abiSize(mod) > 8) {
return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{});
}
const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty);
@@ -6708,7 +7147,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :val ptr_val;
};
- const result_ptr = if (isByRef(result_ty, func.target)) val: {
+ const result_ptr = if (isByRef(result_ty, mod)) val: {
try func.emitWValue(cmp_result);
try func.addImm32(-1);
try func.addTag(.i32_xor);
@@ -6716,7 +7155,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_and);
const and_result = try WValue.toLocal(.stack, func, Type.bool);
const result_ptr = try func.allocStack(result_ty);
- try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(func.target)));
+ try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(mod)));
try func.store(result_ptr, ptr_val, ty, 0);
break :val result_ptr;
} else val: {
@@ -6727,16 +7166,17 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :val try WValue.toLocal(.stack, func, result_ty);
};
- return func.finishAir(inst, result_ptr, &.{ extra.ptr, extra.new_value, extra.expected_value });
+ return func.finishAir(inst, result_ptr, &.{ extra.ptr, extra.expected_value, extra.new_value });
}
fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const atomic_load = func.air.instructions.items(.data)[inst].atomic_load;
const ptr = try func.resolveInst(atomic_load.ptr);
- const ty = func.air.typeOfIndex(inst);
+ const ty = func.typeOfIndex(inst);
if (func.useAtomicFeature()) {
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) {
1 => .i32_atomic_load8_u,
2 => .i32_atomic_load16_u,
4 => .i32_atomic_load,
@@ -6746,7 +7186,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
});
} else {
_ = try func.load(ptr, ty, 0);
@@ -6757,12 +7197,13 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data;
const ptr = try func.resolveInst(pl_op.operand);
const operand = try func.resolveInst(extra.operand);
- const ty = func.air.typeOfIndex(inst);
+ const ty = func.typeOfIndex(inst);
const op: std.builtin.AtomicRmwOp = extra.op();
if (func.useAtomicFeature()) {
@@ -6780,7 +7221,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.emitWValue(value);
if (op == .Nand) {
- const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?;
+ const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
const and_res = try func.binOp(value, operand, ty, .@"and");
if (wasm_bits == 32)
@@ -6797,7 +7238,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.select);
}
try func.addAtomicMemArg(
- switch (ty.abiSize(func.target)) {
+ switch (ty.abiSize(mod)) {
1 => .i32_atomic_rmw8_cmpxchg_u,
2 => .i32_atomic_rmw16_cmpxchg_u,
4 => .i32_atomic_rmw_cmpxchg,
@@ -6806,7 +7247,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
},
);
const select_res = try func.allocLocal(ty);
@@ -6825,7 +7266,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => {
try func.emitWValue(ptr);
try func.emitWValue(operand);
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) {
1 => switch (op) {
.Xchg => .i32_atomic_rmw8_xchg_u,
.Add => .i32_atomic_rmw8_add_u,
@@ -6866,7 +7307,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
});
const result = try WValue.toLocal(.stack, func, ty);
return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
@@ -6895,7 +7336,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.Xor => .xor,
else => unreachable,
});
- if (ty.isInt() and (op == .Add or op == .Sub)) {
+ if (ty.isInt(mod) and (op == .Add or op == .Sub)) {
_ = try func.wrapOperand(.stack, ty);
}
try func.store(.stack, .stack, ty, ptr.offset());
@@ -6911,7 +7352,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.store(.stack, .stack, ty, ptr.offset());
},
.Nand => {
- const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?;
+ const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
try func.emitWValue(ptr);
const and_res = try func.binOp(result, operand, ty, .@"and");
@@ -6942,15 +7383,16 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const ptr = try func.resolveInst(bin_op.lhs);
const operand = try func.resolveInst(bin_op.rhs);
- const ptr_ty = func.air.typeOf(bin_op.lhs);
- const ty = ptr_ty.childType();
+ const ptr_ty = func.typeOf(bin_op.lhs);
+ const ty = ptr_ty.childType(mod);
if (func.useAtomicFeature()) {
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) {
1 => .i32_atomic_store8,
2 => .i32_atomic_store16,
4 => .i32_atomic_store,
@@ -6961,7 +7403,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.lowerToStack(operand);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
});
} else {
try func.store(ptr, operand, ty, 0);
@@ -6969,3 +7411,22 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
}
+
+fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ if (func.initial_stack_value == .none) {
+ try func.initializeStack();
+ }
+ try func.emitWValue(func.bottom_stack_value);
+ const result = try WValue.toLocal(.stack, func, Type.usize);
+ return func.finishAir(inst, result, &.{});
+}
+
+fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type {
+ const mod = func.bin_file.base.options.module.?;
+ return func.air.typeOf(inst, &mod.intern_pool);
+}
+
+fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type {
+ const mod = func.bin_file.base.options.module.?;
+ return func.air.typeOfIndex(inst, &mod.intern_pool);
+}