aboutsummaryrefslogtreecommitdiff
path: root/src/arch
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2025-01-16 04:20:41 -0500
committerGitHub <noreply@github.com>2025-01-16 04:20:41 -0500
commitd4fe4698d9ff865ed1dc7e0163f2d5fcbe2b45a6 (patch)
tree160d596e8ab0ab9568dac3f026c2ce42ad1c935e /src/arch
parent77273103a8f9895ceab28287dffcf4d4c6fcb91b (diff)
parenteda8b6e137a10f398cd292b533e924960f7fc409 (diff)
downloadzig-d4fe4698d9ff865ed1dc7e0163f2d5fcbe2b45a6.tar.gz
zig-d4fe4698d9ff865ed1dc7e0163f2d5fcbe2b45a6.zip
Merge pull request #22220 from ziglang/wasm-linker
wasm linker: aggressive rewrite towards Data-Oriented Design
Diffstat (limited to 'src/arch')
-rw-r--r--src/arch/aarch64/CodeGen.zig298
-rw-r--r--src/arch/aarch64/Emit.zig6
-rw-r--r--src/arch/arm/CodeGen.zig46
-rw-r--r--src/arch/arm/Emit.zig6
-rw-r--r--src/arch/riscv64/CodeGen.zig109
-rw-r--r--src/arch/riscv64/Emit.zig17
-rw-r--r--src/arch/sparc64/CodeGen.zig52
-rw-r--r--src/arch/sparc64/Emit.zig9
-rw-r--r--src/arch/wasm/CodeGen.zig6625
-rw-r--r--src/arch/wasm/Emit.zig1540
-rw-r--r--src/arch/wasm/Mir.zig440
-rw-r--r--src/arch/wasm/abi.zig2
-rw-r--r--src/arch/x86_64/CodeGen.zig121
-rw-r--r--src/arch/x86_64/Emit.zig15
14 files changed, 4709 insertions, 4577 deletions
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 8fd27d4bb7..33f6919592 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -24,7 +24,6 @@ const build_options = @import("build_options");
const Alignment = InternPool.Alignment;
const CodeGenError = codegen.CodeGenError;
-const Result = codegen.Result;
const bits = @import("bits.zig");
const abi = @import("abi.zig");
@@ -51,7 +50,6 @@ debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
func_index: InternPool.Index,
owner_nav: InternPool.Nav.Index,
-err_msg: ?*ErrorMsg,
args: []MCValue,
ret_mcv: MCValue,
fn_type: Type,
@@ -325,9 +323,9 @@ pub fn generate(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
- code: *std.ArrayList(u8),
+ code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
-) CodeGenError!Result {
+) CodeGenError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
@@ -353,7 +351,6 @@ pub fn generate(
.bin_file = lf,
.func_index = func_index,
.owner_nav = func.owner_nav,
- .err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
.fn_type = fn_type,
@@ -370,10 +367,7 @@ pub fn generate(
defer function.dbg_info_relocs.deinit(gpa);
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
- },
+ error.CodegenFail => return error.CodegenFail,
else => |e| return e,
};
defer call_info.deinit(&function);
@@ -384,24 +378,23 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
- },
+ error.CodegenFail => return error.CodegenFail,
+ error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
else => |e| return e,
};
for (function.dbg_info_relocs.items) |reloc| {
- try reloc.genDbgInfo(function);
+ reloc.genDbgInfo(function) catch |err|
+ return function.fail("failed to generate debug info: {s}", .{@errorName(err)});
}
- var mir = Mir{
+ var mir: Mir = .{
.instructions = function.mir_instructions.toOwnedSlice(),
.extra = try function.mir_extra.toOwnedSlice(gpa),
};
defer mir.deinit(gpa);
- var emit = Emit{
+ var emit: Emit = .{
.mir = mir,
.bin_file = lf,
.debug_output = debug_output,
@@ -417,15 +410,9 @@ pub fn generate(
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
- error.EmitFail => return Result{ .fail = emit.err_msg.? },
+ error.EmitFail => return function.failMsg(emit.err_msg.?),
else => |e| return e,
};
-
- if (function.err_msg) |em| {
- return Result{ .fail = em };
- } else {
- return Result.ok;
- }
}
fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
@@ -567,7 +554,7 @@ fn gen(self: *Self) !void {
.data = .{ .rr_imm12_sh = .{ .rd = .sp, .rn = .sp, .imm12 = size } },
});
} else {
- return self.failSymbol("TODO AArch64: allow larger stacks", .{});
+ @panic("TODO AArch64: allow larger stacks");
}
_ = try self.addInst(.{
@@ -723,7 +710,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.cmp_gt => try self.airCmp(inst, .gt),
.cmp_neq => try self.airCmp(inst, .neq),
- .cmp_vector => try self.airCmpVector(inst),
+ .cmp_vector => try self.airCmpVector(inst),
.cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst),
.alloc => try self.airAlloc(inst),
@@ -744,7 +731,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.fpext => try self.airFpext(inst),
.intcast => try self.airIntCast(inst),
.trunc => try self.airTrunc(inst),
- .int_from_bool => try self.airIntFromBool(inst),
+ .int_from_bool => try self.airIntFromBool(inst),
.is_non_null => try self.airIsNonNull(inst),
.is_non_null_ptr => try self.airIsNonNullPtr(inst),
.is_null => try self.airIsNull(inst),
@@ -756,7 +743,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.load => try self.airLoad(inst),
.loop => try self.airLoop(inst),
.not => try self.airNot(inst),
- .int_from_ptr => try self.airIntFromPtr(inst),
+ .int_from_ptr => try self.airIntFromPtr(inst),
.ret => try self.airRet(inst),
.ret_safe => try self.airRet(inst), // TODO
.ret_load => try self.airRetLoad(inst),
@@ -765,8 +752,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.struct_field_ptr=> try self.airStructFieldPtr(inst),
.struct_field_val=> try self.airStructFieldVal(inst),
.array_to_slice => try self.airArrayToSlice(inst),
- .float_from_int => try self.airFloatFromInt(inst),
- .int_from_float => try self.airIntFromFloat(inst),
+ .float_from_int => try self.airFloatFromInt(inst),
+ .int_from_float => try self.airIntFromFloat(inst),
.cmpxchg_strong => try self.airCmpxchg(inst),
.cmpxchg_weak => try self.airCmpxchg(inst),
.atomic_rmw => try self.airAtomicRmw(inst),
@@ -1107,7 +1094,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void {
/// Copies a value to a register without tracking the register. The register is not considered
/// allocated. A second call to `copyToTmpRegister` may return the same register.
/// This can have a side effect of spilling instructions to the stack to free up a register.
-fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
+fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) InnerError!Register {
const raw_reg = try self.register_manager.allocReg(null, gp);
const reg = self.registerAlias(raw_reg, ty);
try self.genSetReg(ty, reg, mcv);
@@ -1125,12 +1112,12 @@ fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCVa
return MCValue{ .register = reg };
}
-fn airAlloc(self: *Self, inst: Air.Inst.Index) !void {
+fn airAlloc(self: *Self, inst: Air.Inst.Index) InnerError!void {
const stack_offset = try self.allocMemPtr(inst);
return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none });
}
-fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airRetPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const result: MCValue = switch (self.ret_mcv) {
@@ -1152,19 +1139,19 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ .none, .none, .none });
}
-fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void {
+fn airFptrunc(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFptrunc for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
+fn airFpext(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFpext for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
+fn airIntCast(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
@@ -1293,7 +1280,7 @@ fn trunc(
}
}
-fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
+fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
@@ -1306,14 +1293,14 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void {
+fn airIntFromBool(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else operand;
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn airNot(self: *Self, inst: Air.Inst.Index) !void {
+fn airNot(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const pt = self.pt;
const zcu = pt.zcu;
@@ -1484,7 +1471,7 @@ fn minMax(
}
}
-fn airMinMax(self: *Self, inst: Air.Inst.Index) !void {
+fn airMinMax(self: *Self, inst: Air.Inst.Index) InnerError!void {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs_ty = self.typeOf(bin_op.lhs);
@@ -1502,7 +1489,7 @@ fn airMinMax(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
+fn airSlice(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
@@ -2440,7 +2427,7 @@ fn ptrArithmetic(
}
}
-fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
+fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs_ty = self.typeOf(bin_op.lhs);
const rhs_ty = self.typeOf(bin_op.rhs);
@@ -2490,7 +2477,7 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
+fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const lhs_ty = self.typeOf(bin_op.lhs);
@@ -2505,25 +2492,25 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
+fn airAddSat(self: *Self, inst: Air.Inst.Index) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement add_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
+fn airSubSat(self: *Self, inst: Air.Inst.Index) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
+fn airMulSat(self: *Self, inst: Air.Inst.Index) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
+fn airOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -2536,9 +2523,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
const tuple_ty = self.typeOfIndex(inst);
- const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(zcu)));
+ const tuple_size: u32 = @intCast(tuple_ty.abiSize(zcu));
const tuple_align = tuple_ty.abiAlignment(zcu);
- const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, zcu)));
+ const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, zcu));
switch (lhs_ty.zigTypeTag(zcu)) {
.vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
@@ -2652,7 +2639,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
-fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
+fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
@@ -2876,7 +2863,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
-fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
+fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
@@ -3012,13 +2999,13 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
-fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
+fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
+fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const optional_ty = self.typeOf(ty_op.operand);
@@ -3055,13 +3042,13 @@ fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty:
}
}
-fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
+fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .optional_payload_ptr_set for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@@ -3137,7 +3124,7 @@ fn errUnionErr(
}
}
-fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
+fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand };
@@ -3218,7 +3205,7 @@ fn errUnionPayload(
}
}
-fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
+fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand };
@@ -3230,26 +3217,26 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
}
// *(E!T) -> E
-fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
// *(E!T) -> *T
-fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
+fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .errunion_payload_ptr_set for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void {
+fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) InnerError!void {
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
else
@@ -3257,17 +3244,17 @@ fn airErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ .none, .none, .none });
}
-fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) !void {
+fn airSetErrReturnTrace(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airSetErrReturnTrace for {}", .{self.target.cpu.arch});
}
-fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
+fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airSaveErrReturnTraceIndex for {}", .{self.target.cpu.arch});
}
-fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
+fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -3313,7 +3300,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
}
/// T to E!T
-fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
+fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -3338,7 +3325,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
}
/// E to E!T
-fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
+fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const pt = self.pt;
@@ -3379,7 +3366,7 @@ fn slicePtr(mcv: MCValue) MCValue {
}
}
-fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airSlicePtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(ty_op.operand);
@@ -3388,7 +3375,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
+fn airSliceLen(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = 64;
@@ -3412,7 +3399,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = 64;
@@ -3429,7 +3416,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(ty_op.operand);
@@ -3444,7 +3431,7 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
+fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -3487,7 +3474,7 @@ fn ptrElemVal(
}
}
-fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
@@ -3506,13 +3493,13 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
-fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
+fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
+fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -3526,7 +3513,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
@@ -3542,55 +3529,55 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
-fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
+fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
_ = bin_op;
return self.fail("TODO implement airSetUnionTag for {}", .{self.target.cpu.arch});
}
-fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
+fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airGetUnionTag for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airClz(self: *Self, inst: Air.Inst.Index) !void {
+fn airClz(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airClz for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
+fn airCtz(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airCtz for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
+fn airPopcount(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airPopcount for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
+fn airAbs(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airAbs for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
+fn airByteSwap(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airByteSwap for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
+fn airBitReverse(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airBitReverse for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void {
+fn airUnaryMath(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
@@ -3885,7 +3872,7 @@ fn genInlineMemsetCode(
// end:
}
-fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
+fn airLoad(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -4086,7 +4073,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
}
}
-fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
+fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) InnerError!void {
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
@@ -4103,14 +4090,14 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
const result = try self.structFieldPtr(inst, extra.struct_operand, extra.field_index);
return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
}
-fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
+fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result = try self.structFieldPtr(inst, ty_op.operand, index);
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@@ -4138,7 +4125,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
};
}
-fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
+fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
const operand = extra.struct_operand;
@@ -4194,7 +4181,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
}
-fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -4218,7 +4205,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.field_ptr, .none, .none });
}
-fn airArg(self: *Self, inst: Air.Inst.Index) !void {
+fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!void {
// skip zero-bit arguments as they don't have a corresponding arg instruction
var arg_index = self.arg_index;
while (self.args[arg_index] == .none) arg_index += 1;
@@ -4238,7 +4225,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ .none, .none, .none });
}
-fn airTrap(self: *Self) !void {
+fn airTrap(self: *Self) InnerError!void {
_ = try self.addInst(.{
.tag = .brk,
.data = .{ .imm16 = 0x0001 },
@@ -4246,7 +4233,7 @@ fn airTrap(self: *Self) !void {
return self.finishAirBookkeeping();
}
-fn airBreakpoint(self: *Self) !void {
+fn airBreakpoint(self: *Self) InnerError!void {
_ = try self.addInst(.{
.tag = .brk,
.data = .{ .imm16 = 0xf000 },
@@ -4254,17 +4241,17 @@ fn airBreakpoint(self: *Self) !void {
return self.finishAirBookkeeping();
}
-fn airRetAddr(self: *Self, inst: Air.Inst.Index) !void {
+fn airRetAddr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airRetAddr for aarch64", .{});
return self.finishAir(inst, result, .{ .none, .none, .none });
}
-fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void {
+fn airFrameAddress(self: *Self, inst: Air.Inst.Index) InnerError!void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFrameAddress for aarch64", .{});
return self.finishAir(inst, result, .{ .none, .none, .none });
}
-fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void {
+fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) InnerError!void {
if (modifier == .always_tail) return self.fail("TODO implement tail calls for aarch64", .{});
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const callee = pl_op.operand;
@@ -4422,7 +4409,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
return bt.finishAir(result);
}
-fn airRet(self: *Self, inst: Air.Inst.Index) !void {
+fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@@ -4455,7 +4442,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
-fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
+fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@@ -4499,7 +4486,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
-fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
+fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) InnerError!void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs_ty = self.typeOf(bin_op.lhs);
@@ -4597,12 +4584,12 @@ fn cmp(
}
}
-fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void {
+fn airCmpVector(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airCmpVector for {}", .{self.target.cpu.arch});
}
-fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
+fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
_ = operand;
@@ -4610,7 +4597,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
+fn airDbgStmt(self: *Self, inst: Air.Inst.Index) InnerError!void {
const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
_ = try self.addInst(.{
@@ -4624,7 +4611,7 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAirBookkeeping();
}
-fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
+fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -4635,7 +4622,7 @@ fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
}
-fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
+fn airDbgVar(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const operand = pl_op.operand;
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
@@ -4686,7 +4673,7 @@ fn condBr(self: *Self, condition: MCValue) !Mir.Inst.Index {
}
}
-fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
+fn airCondBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
@@ -4919,7 +4906,7 @@ fn isNonErr(
}
}
-fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
+fn airIsNull(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
@@ -4930,7 +4917,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@@ -4947,7 +4934,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
+fn airIsNonNull(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
@@ -4958,7 +4945,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@@ -4975,7 +4962,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
+fn airIsErr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_bind: ReadArg.Bind = .{ .inst = un_op };
@@ -4986,7 +4973,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@@ -5003,7 +4990,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
+fn airIsNonErr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_bind: ReadArg.Bind = .{ .inst = un_op };
@@ -5014,7 +5001,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@@ -5031,7 +5018,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
+fn airLoop(self: *Self, inst: Air.Inst.Index) InnerError!void {
// A loop is a setup to be able to jump back to the beginning.
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
@@ -5052,7 +5039,7 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void {
});
}
-fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
+fn airBlock(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
@@ -5090,7 +5077,7 @@ fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) !
return self.finishAir(inst, result, .{ .none, .none, .none });
}
-fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
+fn airSwitch(self: *Self, inst: Air.Inst.Index) InnerError!void {
const switch_br = self.air.unwrapSwitch(inst);
const condition_ty = self.typeOf(switch_br.operand);
const liveness = try self.liveness.getSwitchBr(
@@ -5224,7 +5211,7 @@ fn performReloc(self: *Self, inst: Mir.Inst.Index) !void {
}
}
-fn airBr(self: *Self, inst: Air.Inst.Index) !void {
+fn airBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
try self.br(branch.block_inst, branch.operand);
return self.finishAir(inst, .dead, .{ branch.operand, .none, .none });
@@ -5268,7 +5255,7 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
}));
}
-fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
+fn airAsm(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
@@ -5601,7 +5588,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = .ldr_ptr_stack,
.data = .{ .load_store_stack = .{
.rt = reg,
- .offset = @as(u32, @intCast(off)),
+ .offset = @intCast(off),
} },
});
},
@@ -5617,13 +5604,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.immediate => |x| {
_ = try self.addInst(.{
.tag = .movz,
- .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x)) } },
+ .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(x) } },
});
if (x & 0x0000_0000_ffff_0000 != 0) {
_ = try self.addInst(.{
.tag = .movk,
- .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 16)), .hw = 1 } },
+ .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(x >> 16), .hw = 1 } },
});
}
@@ -5631,13 +5618,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (x & 0x0000_ffff_0000_0000 != 0) {
_ = try self.addInst(.{
.tag = .movk,
- .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 32)), .hw = 2 } },
+ .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(x >> 32), .hw = 2 } },
});
}
if (x & 0xffff_0000_0000_0000 != 0) {
_ = try self.addInst(.{
.tag = .movk,
- .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 48)), .hw = 3 } },
+ .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(x >> 48), .hw = 3 } },
});
}
}
@@ -5709,7 +5696,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = tag,
.data = .{ .load_store_stack = .{
.rt = reg,
- .offset = @as(u32, @intCast(off)),
+ .offset = @intCast(off),
} },
});
},
@@ -5733,7 +5720,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.tag = tag,
.data = .{ .load_store_stack = .{
.rt = reg,
- .offset = @as(u32, @intCast(off)),
+ .offset = @intCast(off),
} },
});
},
@@ -5918,13 +5905,13 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
}
}
-fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const result = try self.resolveInst(un_op);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
+fn airBitCast(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
@@ -5945,7 +5932,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
+fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -5963,7 +5950,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
+fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFloatFromInt for {}", .{
self.target.cpu.arch,
@@ -5971,7 +5958,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void {
+fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airIntFromFloat for {}", .{
self.target.cpu.arch,
@@ -5979,7 +5966,7 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
+fn airCmpxchg(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
_ = extra;
@@ -5989,23 +5976,23 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
});
}
-fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void {
+fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airCmpxchg for {}", .{self.target.cpu.arch});
}
-fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void {
+fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airAtomicLoad for {}", .{self.target.cpu.arch});
}
-fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void {
+fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) InnerError!void {
_ = inst;
_ = order;
return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch});
}
-fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
+fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) InnerError!void {
_ = inst;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
@@ -6015,12 +6002,12 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
return self.fail("TODO implement airMemset for {}", .{self.target.cpu.arch});
}
-fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
+fn airMemcpy(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airMemcpy for {}", .{self.target.cpu.arch});
}
-fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
+fn airTagName(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
@@ -6030,7 +6017,7 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
+fn airErrorName(self: *Self, inst: Air.Inst.Index) InnerError!void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
@@ -6040,33 +6027,33 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
-fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
+fn airSplat(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSplat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
+fn airSelect(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSelect for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs });
}
-fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
+fn airShuffle(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airShuffle for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ extra.a, extra.b, .none });
}
-fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
+fn airReduce(self: *Self, inst: Air.Inst.Index) InnerError!void {
const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airReduce for aarch64", .{});
return self.finishAir(inst, result, .{ reduce.operand, .none, .none });
}
-fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
+fn airAggregateInit(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const vector_ty = self.typeOfIndex(inst);
@@ -6090,19 +6077,19 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
return bt.finishAir(result);
}
-fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
+fn airUnionInit(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
_ = extra;
return self.fail("TODO implement airUnionInit for aarch64", .{});
}
-fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void {
+fn airPrefetch(self: *Self, inst: Air.Inst.Index) InnerError!void {
const prefetch = self.air.instructions.items(.data)[@intFromEnum(inst)].prefetch;
return self.finishAir(inst, MCValue.dead, .{ prefetch.ptr, .none, .none });
}
-fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
+fn airMulAdd(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else {
@@ -6111,7 +6098,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand });
}
-fn airTry(self: *Self, inst: Air.Inst.Index) !void {
+fn airTry(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Try, pl_op.payload);
@@ -6139,7 +6126,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ pl_op.operand, .none, .none });
}
-fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
+fn airTryPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
@@ -6191,10 +6178,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.load_direct => |sym_index| .{ .linker_load = .{ .type = .direct, .sym_index = sym_index } },
.load_symbol, .load_tlv, .lea_symbol, .lea_direct => unreachable, // TODO
},
- .fail => |msg| {
- self.err_msg = msg;
- return error.CodegenFail;
- },
+ .fail => |msg| return self.failMsg(msg),
};
return mcv;
}
@@ -6355,18 +6339,14 @@ fn wantSafety(self: *Self) bool {
};
}
-fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError {
+fn fail(self: *Self, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
- assert(self.err_msg == null);
- self.err_msg = try ErrorMsg.create(self.gpa, self.src_loc, format, args);
- return error.CodegenFail;
+ return self.pt.zcu.codegenFail(self.owner_nav, format, args);
}
-fn failSymbol(self: *Self, comptime format: []const u8, args: anytype) InnerError {
+fn failMsg(self: *Self, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
- assert(self.err_msg == null);
- self.err_msg = try ErrorMsg.create(self.gpa, self.src_loc, format, args);
- return error.CodegenFail;
+ return self.pt.zcu.codegenFailMsg(self.owner_nav, msg);
}
fn parseRegName(name: []const u8) ?Register {
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index e053b42f41..f76732125b 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -20,7 +20,7 @@ debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
src_loc: Zcu.LazySrcLoc,
-code: *std.ArrayList(u8),
+code: *std.ArrayListUnmanaged(u8),
prev_di_line: u32,
prev_di_column: u32,
@@ -424,8 +424,10 @@ fn lowerBranches(emit: *Emit) !void {
}
fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
+ const comp = emit.bin_file.comp;
+ const gpa = comp.gpa;
const endian = emit.target.cpu.arch.endian();
- std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian);
+ std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), instruction.toU32(), endian);
}
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 065f4a047d..65a202803d 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -23,7 +23,6 @@ const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const Alignment = InternPool.Alignment;
-const Result = codegen.Result;
const CodeGenError = codegen.CodeGenError;
const bits = @import("bits.zig");
@@ -333,9 +332,9 @@ pub fn generate(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
- code: *std.ArrayList(u8),
+ code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
-) CodeGenError!Result {
+) CodeGenError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
@@ -377,10 +376,7 @@ pub fn generate(
defer function.dbg_info_relocs.deinit(gpa);
var call_info = function.resolveCallingConventionValues(func_ty) catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
- },
+ error.CodegenFail => return error.CodegenFail,
else => |e| return e,
};
defer call_info.deinit(&function);
@@ -391,15 +387,14 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
- },
+ error.CodegenFail => return error.CodegenFail,
+ error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
else => |e| return e,
};
for (function.dbg_info_relocs.items) |reloc| {
- try reloc.genDbgInfo(function);
+ reloc.genDbgInfo(function) catch |err|
+ return function.fail("failed to generate debug info: {s}", .{@errorName(err)});
}
var mir = Mir{
@@ -424,15 +419,9 @@ pub fn generate(
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
- error.EmitFail => return Result{ .fail = emit.err_msg.? },
+ error.EmitFail => return function.failMsg(emit.err_msg.?),
else => |e| return e,
};
-
- if (function.err_msg) |em| {
- return Result{ .fail = em };
- } else {
- return Result.ok;
- }
}
fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
@@ -6310,20 +6299,19 @@ fn wantSafety(self: *Self) bool {
};
}
-fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError {
+fn fail(self: *Self, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
- assert(self.err_msg == null);
- const gpa = self.gpa;
- self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args);
- return error.CodegenFail;
+ const zcu = self.pt.zcu;
+ const func = zcu.funcInfo(self.func_index);
+ const msg = try ErrorMsg.create(zcu.gpa, self.src_loc, format, args);
+ return zcu.codegenFailMsg(func.owner_nav, msg);
}
-fn failSymbol(self: *Self, comptime format: []const u8, args: anytype) InnerError {
+fn failMsg(self: *Self, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
- assert(self.err_msg == null);
- const gpa = self.gpa;
- self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args);
- return error.CodegenFail;
+ const zcu = self.pt.zcu;
+ const func = zcu.funcInfo(self.func_index);
+ return zcu.codegenFailMsg(func.owner_nav, msg);
}
fn parseRegName(name: []const u8) ?Register {
diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig
index 03940dfc3c..4ec6d3867a 100644
--- a/src/arch/arm/Emit.zig
+++ b/src/arch/arm/Emit.zig
@@ -24,7 +24,7 @@ debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
src_loc: Zcu.LazySrcLoc,
-code: *std.ArrayList(u8),
+code: *std.ArrayListUnmanaged(u8),
prev_di_line: u32,
prev_di_column: u32,
@@ -342,8 +342,10 @@ fn lowerBranches(emit: *Emit) !void {
}
fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
+ const comp = emit.bin_file.comp;
+ const gpa = comp.gpa;
const endian = emit.target.cpu.arch.endian();
- std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian);
+ std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), instruction.toU32(), endian);
}
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index a836d02d71..820188e188 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -32,7 +32,6 @@ const wip_mir_log = std.log.scoped(.wip_mir);
const Alignment = InternPool.Alignment;
const CodeGenError = codegen.CodeGenError;
-const Result = codegen.Result;
const bits = @import("bits.zig");
const abi = @import("abi.zig");
@@ -62,7 +61,6 @@ gpa: Allocator,
mod: *Package.Module,
target: *const std.Target,
debug_output: link.File.DebugInfoOutput,
-err_msg: ?*ErrorMsg,
args: []MCValue,
ret_mcv: InstTracking,
fn_type: Type,
@@ -759,9 +757,9 @@ pub fn generate(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
- code: *std.ArrayList(u8),
+ code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
-) CodeGenError!Result {
+) CodeGenError!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = zcu.gpa;
@@ -788,7 +786,6 @@ pub fn generate(
.target = &mod.resolved_target.result,
.debug_output = debug_output,
.owner = .{ .nav_index = func.owner_nav },
- .err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
.fn_type = fn_type,
@@ -829,10 +826,7 @@ pub fn generate(
const fn_info = zcu.typeToFunc(fn_type).?;
var call_info = function.resolveCallingConventionValues(fn_info, &.{}) catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
- },
+ error.CodegenFail => return error.CodegenFail,
else => |e| return e,
};
@@ -861,10 +855,8 @@ pub fn generate(
}));
function.gen() catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
- },
+ error.CodegenFail => return error.CodegenFail,
+ error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
else => |e| return e,
};
@@ -895,28 +887,10 @@ pub fn generate(
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
- error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? },
- error.InvalidInstruction => |e| {
- const msg = switch (e) {
- error.InvalidInstruction => "CodeGen failed to find a viable instruction.",
- };
- return Result{
- .fail = try ErrorMsg.create(
- gpa,
- src_loc,
- "{s} This is a bug in the Zig compiler.",
- .{msg},
- ),
- };
- },
+ error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?),
+ error.InvalidInstruction => |e| return function.fail("emit MIR failed: {s} (Zig compiler bug)", .{@errorName(e)}),
else => |e| return e,
};
-
- if (function.err_msg) |em| {
- return Result{ .fail = em };
- } else {
- return Result.ok;
- }
}
pub fn generateLazy(
@@ -924,9 +898,9 @@ pub fn generateLazy(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
- code: *std.ArrayList(u8),
+ code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
-) CodeGenError!Result {
+) CodeGenError!void {
const comp = bin_file.comp;
const gpa = comp.gpa;
const mod = comp.root_mod;
@@ -941,7 +915,6 @@ pub fn generateLazy(
.target = &mod.resolved_target.result,
.debug_output = debug_output,
.owner = .{ .lazy_sym = lazy_sym },
- .err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
.fn_type = undefined,
@@ -957,10 +930,8 @@ pub fn generateLazy(
defer function.mir_instructions.deinit(gpa);
function.genLazy(lazy_sym) catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
- },
+ error.CodegenFail => return error.CodegenFail,
+ error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
else => |e| return e,
};
@@ -991,28 +962,10 @@ pub fn generateLazy(
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
- error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? },
- error.InvalidInstruction => |e| {
- const msg = switch (e) {
- error.InvalidInstruction => "CodeGen failed to find a viable instruction.",
- };
- return Result{
- .fail = try ErrorMsg.create(
- gpa,
- src_loc,
- "{s} This is a bug in the Zig compiler.",
- .{msg},
- ),
- };
- },
+ error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?),
+ error.InvalidInstruction => |e| return function.fail("emit MIR failed: {s} (Zig compiler bug)", .{@errorName(e)}),
else => |e| return e,
};
-
- if (function.err_msg) |em| {
- return Result{ .fail = em };
- } else {
- return Result.ok;
- }
}
const FormatWipMirData = struct {
@@ -4758,19 +4711,19 @@ fn airFieldParentPtr(func: *Func, inst: Air.Inst.Index) !void {
return func.fail("TODO implement codegen airFieldParentPtr", .{});
}
-fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void {
+fn genArgDbgInfo(func: *const Func, inst: Air.Inst.Index, mcv: MCValue) InnerError!void {
const arg = func.air.instructions.items(.data)[@intFromEnum(inst)].arg;
const ty = arg.ty.toType();
if (arg.name == .none) return;
switch (func.debug_output) {
.dwarf => |dw| switch (mcv) {
- .register => |reg| try dw.genLocalDebugInfo(
+ .register => |reg| dw.genLocalDebugInfo(
.local_arg,
arg.name.toSlice(func.air),
ty,
.{ .reg = reg.dwarfNum() },
- ),
+ ) catch |err| return func.fail("failed to generate debug info: {s}", .{@errorName(err)}),
.load_frame => {},
else => {},
},
@@ -4779,7 +4732,7 @@ fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void {
}
}
-fn airArg(func: *Func, inst: Air.Inst.Index) !void {
+fn airArg(func: *Func, inst: Air.Inst.Index) InnerError!void {
var arg_index = func.arg_index;
// we skip over args that have no bits
@@ -5255,7 +5208,7 @@ fn airDbgInlineBlock(func: *Func, inst: Air.Inst.Index) !void {
try func.lowerBlock(inst, @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]));
}
-fn airDbgVar(func: *Func, inst: Air.Inst.Index) !void {
+fn airDbgVar(func: *Func, inst: Air.Inst.Index) InnerError!void {
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const operand = pl_op.operand;
const ty = func.typeOf(operand);
@@ -5263,7 +5216,8 @@ fn airDbgVar(func: *Func, inst: Air.Inst.Index) !void {
const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
const tag = func.air.instructions.items(.tag)[@intFromEnum(inst)];
- try func.genVarDbgInfo(tag, ty, mcv, name.toSlice(func.air));
+ func.genVarDbgInfo(tag, ty, mcv, name.toSlice(func.air)) catch |err|
+ return func.fail("failed to generate variable debug info: {s}", .{@errorName(err)});
return func.finishAir(inst, .unreach, .{ operand, .none, .none });
}
@@ -8236,10 +8190,7 @@ fn genTypedValue(func: *Func, val: Value) InnerError!MCValue {
return func.fail("TODO: genTypedValue {s}", .{@tagName(mcv)});
},
},
- .fail => |msg| {
- func.err_msg = msg;
- return error.CodegenFail;
- },
+ .fail => |msg| return func.failMsg(msg),
};
return mcv;
}
@@ -8427,17 +8378,23 @@ fn wantSafety(func: *Func) bool {
};
}
-fn fail(func: *Func, comptime format: []const u8, args: anytype) InnerError {
+fn fail(func: *const Func, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
- assert(func.err_msg == null);
- func.err_msg = try ErrorMsg.create(func.gpa, func.src_loc, format, args);
+ const zcu = func.pt.zcu;
+ switch (func.owner) {
+ .nav_index => |i| return zcu.codegenFail(i, format, args),
+ .lazy_sym => |s| return zcu.codegenFailType(s.ty, format, args),
+ }
return error.CodegenFail;
}
-fn failSymbol(func: *Func, comptime format: []const u8, args: anytype) InnerError {
+fn failMsg(func: *const Func, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
- assert(func.err_msg == null);
- func.err_msg = try ErrorMsg.create(func.gpa, func.src_loc, format, args);
+ const zcu = func.pt.zcu;
+ switch (func.owner) {
+ .nav_index => |i| return zcu.codegenFailMsg(i, msg),
+ .lazy_sym => |s| return zcu.codegenFailTypeMsg(s.ty, msg),
+ }
return error.CodegenFail;
}
diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig
index 2c4c04d5d3..095cfc278b 100644
--- a/src/arch/riscv64/Emit.zig
+++ b/src/arch/riscv64/Emit.zig
@@ -3,7 +3,7 @@
bin_file: *link.File,
lower: Lower,
debug_output: link.File.DebugInfoOutput,
-code: *std.ArrayList(u8),
+code: *std.ArrayListUnmanaged(u8),
prev_di_line: u32,
prev_di_column: u32,
@@ -18,6 +18,7 @@ pub const Error = Lower.Error || error{
};
pub fn emitMir(emit: *Emit) Error!void {
+ const gpa = emit.bin_file.comp.gpa;
log.debug("mir instruction len: {}", .{emit.lower.mir.instructions.len});
for (0..emit.lower.mir.instructions.len) |mir_i| {
const mir_index: Mir.Inst.Index = @intCast(mir_i);
@@ -30,7 +31,7 @@ pub fn emitMir(emit: *Emit) Error!void {
var lowered_relocs = lowered.relocs;
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
const start_offset: u32 = @intCast(emit.code.items.len);
- try lowered_inst.encode(emit.code.writer());
+ try lowered_inst.encode(emit.code.writer(gpa));
while (lowered_relocs.len > 0 and
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
@@ -56,13 +57,13 @@ pub fn emitMir(emit: *Emit) Error!void {
const hi_r_type: u32 = @intFromEnum(std.elf.R_RISCV.HI20);
const lo_r_type: u32 = @intFromEnum(std.elf.R_RISCV.LO12_I);
- try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
+ try atom_ptr.addReloc(gpa, .{
.r_offset = start_offset,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | hi_r_type,
.r_addend = 0,
}, zo);
- try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
+ try atom_ptr.addReloc(gpa, .{
.r_offset = start_offset + 4,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | lo_r_type,
.r_addend = 0,
@@ -76,19 +77,19 @@ pub fn emitMir(emit: *Emit) Error!void {
const R_RISCV = std.elf.R_RISCV;
- try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
+ try atom_ptr.addReloc(gpa, .{
.r_offset = start_offset,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | @intFromEnum(R_RISCV.TPREL_HI20),
.r_addend = 0,
}, zo);
- try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
+ try atom_ptr.addReloc(gpa, .{
.r_offset = start_offset + 4,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | @intFromEnum(R_RISCV.TPREL_ADD),
.r_addend = 0,
}, zo);
- try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
+ try atom_ptr.addReloc(gpa, .{
.r_offset = start_offset + 8,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | @intFromEnum(R_RISCV.TPREL_LO12_I),
.r_addend = 0,
@@ -101,7 +102,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const r_type: u32 = @intFromEnum(std.elf.R_RISCV.CALL_PLT);
- try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
+ try atom_ptr.addReloc(gpa, .{
.r_offset = start_offset,
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | r_type,
.r_addend = 0,
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 7bbed29d8f..32bca3bc90 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -21,7 +21,6 @@ const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Type = @import("../../Type.zig");
const CodeGenError = codegen.CodeGenError;
-const Result = @import("../../codegen.zig").Result;
const Endian = std.builtin.Endian;
const Alignment = InternPool.Alignment;
@@ -55,7 +54,7 @@ liveness: Liveness,
bin_file: *link.File,
target: *const std.Target,
func_index: InternPool.Index,
-code: *std.ArrayList(u8),
+code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
err_msg: ?*ErrorMsg,
args: []MCValue,
@@ -266,9 +265,9 @@ pub fn generate(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
- code: *std.ArrayList(u8),
+ code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
-) CodeGenError!Result {
+) CodeGenError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
@@ -284,7 +283,7 @@ pub fn generate(
}
try branch_stack.append(.{});
- var function = Self{
+ var function: Self = .{
.gpa = gpa,
.pt = pt,
.air = air,
@@ -310,10 +309,7 @@ pub fn generate(
defer function.exitlude_jump_relocs.deinit(gpa);
var call_info = function.resolveCallingConventionValues(func_ty, .callee) catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
- },
+ error.CodegenFail => return error.CodegenFail,
else => |e| return e,
};
defer call_info.deinit(&function);
@@ -324,10 +320,8 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
- },
+ error.CodegenFail => return error.CodegenFail,
+ error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
else => |e| return e,
};
@@ -337,7 +331,7 @@ pub fn generate(
};
defer mir.deinit(gpa);
- var emit = Emit{
+ var emit: Emit = .{
.mir = mir,
.bin_file = lf,
.debug_output = debug_output,
@@ -351,15 +345,9 @@ pub fn generate(
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
- error.EmitFail => return Result{ .fail = emit.err_msg.? },
+ error.EmitFail => return function.failMsg(emit.err_msg.?),
else => |e| return e,
};
-
- if (function.err_msg) |em| {
- return Result{ .fail = em };
- } else {
- return Result.ok;
- }
}
fn gen(self: *Self) !void {
@@ -1014,7 +1002,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
return bt.finishAir(result);
}
-fn airArg(self: *Self, inst: Air.Inst.Index) !void {
+fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const arg_index = self.arg_index;
@@ -1036,7 +1024,8 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
}
};
- try self.genArgDbgInfo(inst, mcv);
+ self.genArgDbgInfo(inst, mcv) catch |err|
+ return self.fail("failed to generate debug info for parameter: {s}", .{@errorName(err)});
if (self.liveness.isUnused(inst))
return self.finishAirBookkeeping();
@@ -3511,12 +3500,19 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type)
}
}
-fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError {
+fn fail(self: *Self, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
- assert(self.err_msg == null);
- const gpa = self.gpa;
- self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args);
- return error.CodegenFail;
+ const zcu = self.pt.zcu;
+ const func = zcu.funcInfo(self.func_index);
+ const msg = try ErrorMsg.create(zcu.gpa, self.src_loc, format, args);
+ return zcu.codegenFailMsg(func.owner_nav, msg);
+}
+
+fn failMsg(self: *Self, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } {
+ @branchHint(.cold);
+ const zcu = self.pt.zcu;
+ const func = zcu.funcInfo(self.func_index);
+ return zcu.codegenFailMsg(func.owner_nav, msg);
}
/// Called when there are no operands, and the instruction is always unreferenced.
diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig
index ca50aa50c6..74537f0231 100644
--- a/src/arch/sparc64/Emit.zig
+++ b/src/arch/sparc64/Emit.zig
@@ -22,7 +22,7 @@ debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
src_loc: Zcu.LazySrcLoc,
-code: *std.ArrayList(u8),
+code: *std.ArrayListUnmanaged(u8),
prev_di_line: u32,
prev_di_column: u32,
@@ -678,10 +678,13 @@ fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType {
}
fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
+ const comp = emit.bin_file.comp;
+ const gpa = comp.gpa;
+
// SPARCv9 instructions are always arranged in BE regardless of the
// endianness mode the CPU is running in (Section 3.1 of the ISA specification).
// This is to ease porting in case someone wants to do a LE SPARCv9 backend.
- const endian = Endian.big;
+ const endian: Endian = .big;
- std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian);
+ std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), instruction.toU32(), endian);
}
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 49961042bc..eeaf9988cb 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1,14 +1,13 @@
const std = @import("std");
const builtin = @import("builtin");
const Allocator = std.mem.Allocator;
-const ArrayList = std.ArrayList;
const assert = std.debug.assert;
const testing = std.testing;
const leb = std.leb;
const mem = std.mem;
-const wasm = std.wasm;
const log = std.log.scoped(.codegen);
+const CodeGen = @This();
const codegen = @import("../../codegen.zig");
const Zcu = @import("../../Zcu.zig");
const InternPool = @import("../../InternPool.zig");
@@ -19,13 +18,113 @@ const Compilation = @import("../../Compilation.zig");
const link = @import("../../link.zig");
const Air = @import("../../Air.zig");
const Liveness = @import("../../Liveness.zig");
-const target_util = @import("../../target.zig");
const Mir = @import("Mir.zig");
const Emit = @import("Emit.zig");
const abi = @import("abi.zig");
const Alignment = InternPool.Alignment;
const errUnionPayloadOffset = codegen.errUnionPayloadOffset;
const errUnionErrorOffset = codegen.errUnionErrorOffset;
+const Wasm = link.File.Wasm;
+
+const target_util = @import("../../target.zig");
+const libcFloatPrefix = target_util.libcFloatPrefix;
+const libcFloatSuffix = target_util.libcFloatSuffix;
+const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev;
+const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev;
+
+/// Reference to the function declaration the code
+/// section belongs to
+owner_nav: InternPool.Nav.Index,
+/// Current block depth. Used to calculate the relative difference between a break
+/// and block
+block_depth: u32 = 0,
+air: Air,
+liveness: Liveness,
+gpa: mem.Allocator,
+func_index: InternPool.Index,
+/// Contains a list of current branches.
+/// When we return from a branch, the branch will be popped from this list,
+/// which means branches can only contain references from within its own branch,
+/// or a branch higher (lower index) in the tree.
+branches: std.ArrayListUnmanaged(Branch) = .empty,
+/// Table to save `WValue`'s generated by an `Air.Inst`
+// values: ValueTable,
+/// Mapping from Air.Inst.Index to block ids
+blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, struct {
+ label: u32,
+ value: WValue,
+}) = .{},
+/// Maps `loop` instructions to their label. `br` to here repeats the loop.
+loops: std.AutoHashMapUnmanaged(Air.Inst.Index, u32) = .empty,
+/// The index the next local generated will have
+/// NOTE: arguments share the index with locals therefore the first variable
+/// will have the index that comes after the last argument's index
+local_index: u32,
+/// The index of the current argument.
+/// Used to track which argument is being referenced in `airArg`.
+arg_index: u32 = 0,
+/// List of simd128 immediates. Each value is stored as an array of bytes.
+/// This list will only be populated for 128bit-simd values when the target features
+/// are enabled also.
+simd_immediates: std.ArrayListUnmanaged([16]u8) = .empty,
+/// The Target we're emitting (used to call intInfo)
+target: *const std.Target,
+ptr_size: enum { wasm32, wasm64 },
+wasm: *link.File.Wasm,
+pt: Zcu.PerThread,
+/// List of MIR Instructions
+mir_instructions: *std.MultiArrayList(Mir.Inst),
+/// Contains extra data for MIR
+mir_extra: *std.ArrayListUnmanaged(u32),
+start_mir_extra_off: u32,
+start_locals_off: u32,
+/// List of all locals' types generated throughout this declaration
+/// used to emit locals count at start of 'code' section.
+locals: *std.ArrayListUnmanaged(std.wasm.Valtype),
+/// When a function is executing, we store the the current stack pointer's value within this local.
+/// This value is then used to restore the stack pointer to the original value at the return of the function.
+initial_stack_value: WValue = .none,
+/// The current stack pointer subtracted with the stack size. From this value, we will calculate
+/// all offsets of the stack values.
+bottom_stack_value: WValue = .none,
+/// Arguments of this function declaration
+/// This will be set after `resolveCallingConventionValues`
+args: []WValue,
+/// This will only be `.none` if the function returns void, or returns an immediate.
+/// When it returns a pointer to the stack, the `.local` tag will be active and must be populated
+/// before this function returns its execution to the caller.
+return_value: WValue,
+/// The size of the stack this function occupies. In the function prologue
+/// we will move the stack pointer by this number, forward aligned with the `stack_alignment`.
+stack_size: u32 = 0,
+/// The stack alignment, which is 16 bytes by default. This is specified by the
+/// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
+/// and also what the llvm backend will emit.
+/// However, local variables or the usage of `incoming_stack_alignment` in a `CallingConvention` can overwrite this default.
+stack_alignment: Alignment = .@"16",
+
+// For each individual Wasm valtype we store a seperate free list which
+// allows us to re-use locals that are no longer used. e.g. a temporary local.
+/// A list of indexes which represents a local of valtype `i32`.
+/// It is illegal to store a non-i32 valtype in this list.
+free_locals_i32: std.ArrayListUnmanaged(u32) = .empty,
+/// A list of indexes which represents a local of valtype `i64`.
+/// It is illegal to store a non-i64 valtype in this list.
+free_locals_i64: std.ArrayListUnmanaged(u32) = .empty,
+/// A list of indexes which represents a local of valtype `f32`.
+/// It is illegal to store a non-f32 valtype in this list.
+free_locals_f32: std.ArrayListUnmanaged(u32) = .empty,
+/// A list of indexes which represents a local of valtype `f64`.
+/// It is illegal to store a non-f64 valtype in this list.
+free_locals_f64: std.ArrayListUnmanaged(u32) = .empty,
+/// A list of indexes which represents a local of valtype `v127`.
+/// It is illegal to store a non-v128 valtype in this list.
+free_locals_v128: std.ArrayListUnmanaged(u32) = .empty,
+
+/// When in debug mode, this tracks if no `finishAir` was missed.
+/// Forgetting to call `finishAir` will cause the result to not be
+/// stored in our `values` map and therefore cause bugs.
+air_bookkeeping: @TypeOf(bookkeeping_init) = bookkeeping_init,
/// Wasm Value, created when generating an instruction
const WValue = union(enum) {
@@ -55,22 +154,15 @@ const WValue = union(enum) {
float32: f32,
/// A constant 64bit float value
float64: f64,
- /// A value that represents a pointer to the data section
- /// Note: The value contains the symbol index, rather than the actual address
- /// as we use this to perform the relocation.
- memory: u32,
- /// A value that represents a parent pointer and an offset
- /// from that pointer. i.e. when slicing with constant values.
- memory_offset: struct {
- /// The symbol of the parent pointer
- pointer: u32,
- /// Offset will be set as addend when relocating
- offset: u32,
+ nav_ref: struct {
+ nav_index: InternPool.Nav.Index,
+ offset: i32 = 0,
+ },
+ uav_ref: struct {
+ ip_index: InternPool.Index,
+ offset: i32 = 0,
+ orig_ptr_ty: InternPool.Index = .none,
},
- /// Represents a function pointer
- /// In wasm function pointers are indexes into a function table,
- /// rather than an address in the data section.
- function_index: u32,
/// Offset from the bottom of the virtual stack, with the offset
/// pointing to where the value lives.
stack_offset: struct {
@@ -101,7 +193,7 @@ const WValue = union(enum) {
switch (value) {
.stack => {
const new_local = try gen.allocLocal(ty);
- try gen.addLabel(.local_set, new_local.local.value);
+ try gen.addLocal(.local_set, new_local.local.value);
return new_local;
},
.local, .stack_offset => return value,
@@ -119,7 +211,7 @@ const WValue = union(enum) {
if (local_value < reserved + 2) return; // reserved locals may never be re-used. Also accounts for 2 stack locals.
const index = local_value - reserved;
- const valtype = @as(wasm.Valtype, @enumFromInt(gen.locals.items[index]));
+ const valtype = gen.locals.items[gen.start_locals_off + index];
switch (valtype) {
.i32 => gen.free_locals_i32.append(gen.gpa, local_value) catch return, // It's ok to fail any of those, a new local can be allocated instead
.i64 => gen.free_locals_i64.append(gen.gpa, local_value) catch return,
@@ -132,8 +224,6 @@ const WValue = union(enum) {
}
};
-/// Wasm ops, but without input/output/signedness information
-/// Used for `buildOpcode`
const Op = enum {
@"unreachable",
nop,
@@ -147,12 +237,8 @@ const Op = enum {
br_table,
@"return",
call,
- call_indirect,
drop,
select,
- local_get,
- local_set,
- local_tee,
global_get,
global_set,
load,
@@ -200,70 +286,38 @@ const Op = enum {
extend,
};
-/// Contains the settings needed to create an `Opcode` using `buildOpcode`.
-///
-/// The fields correspond to the opcode name. Here is an example
-/// i32_trunc_f32_s
-/// ^ ^ ^ ^
-/// | | | |
-/// valtype1 | | |
-/// = .i32 | | |
-/// | | |
-/// op | |
-/// = .trunc | |
-/// | |
-/// valtype2 |
-/// = .f32 |
-/// |
-/// width |
-/// = null |
-/// |
-/// signed
-/// = true
-///
-/// There can be missing fields, here are some more examples:
-/// i64_load8_u
-/// --> .{ .valtype1 = .i64, .op = .load, .width = 8, signed = false }
-/// i32_mul
-/// --> .{ .valtype1 = .i32, .op = .trunc }
-/// nop
-/// --> .{ .op = .nop }
const OpcodeBuildArguments = struct {
/// First valtype in the opcode (usually represents the type of the output)
- valtype1: ?wasm.Valtype = null,
+ valtype1: ?std.wasm.Valtype = null,
/// The operation (e.g. call, unreachable, div, min, sqrt, etc.)
op: Op,
/// Width of the operation (e.g. 8 for i32_load8_s, 16 for i64_extend16_i32_s)
width: ?u8 = null,
/// Second valtype in the opcode name (usually represents the type of the input)
- valtype2: ?wasm.Valtype = null,
+ valtype2: ?std.wasm.Valtype = null,
/// Signedness of the op
signedness: ?std.builtin.Signedness = null,
};
-/// Helper function that builds an Opcode given the arguments needed
-fn buildOpcode(args: OpcodeBuildArguments) wasm.Opcode {
+/// TODO: deprecated, should be split up per tag.
+fn buildOpcode(args: OpcodeBuildArguments) std.wasm.Opcode {
switch (args.op) {
- .@"unreachable" => return .@"unreachable",
- .nop => return .nop,
- .block => return .block,
- .loop => return .loop,
- .@"if" => return .@"if",
- .@"else" => return .@"else",
- .end => return .end,
- .br => return .br,
- .br_if => return .br_if,
- .br_table => return .br_table,
- .@"return" => return .@"return",
- .call => return .call,
- .call_indirect => return .call_indirect,
- .drop => return .drop,
- .select => return .select,
- .local_get => return .local_get,
- .local_set => return .local_set,
- .local_tee => return .local_tee,
- .global_get => return .global_get,
- .global_set => return .global_set,
+ .@"unreachable" => unreachable,
+ .nop => unreachable,
+ .block => unreachable,
+ .loop => unreachable,
+ .@"if" => unreachable,
+ .@"else" => unreachable,
+ .end => unreachable,
+ .br => unreachable,
+ .br_if => unreachable,
+ .br_table => unreachable,
+ .@"return" => unreachable,
+ .call => unreachable,
+ .drop => unreachable,
+ .select => unreachable,
+ .global_get => unreachable,
+ .global_set => unreachable,
.load => if (args.width) |width| switch (width) {
8 => switch (args.valtype1.?) {
@@ -621,121 +675,17 @@ fn buildOpcode(args: OpcodeBuildArguments) wasm.Opcode {
test "Wasm - buildOpcode" {
// Make sure buildOpcode is referenced, and test some examples
const i32_const = buildOpcode(.{ .op = .@"const", .valtype1 = .i32 });
- const end = buildOpcode(.{ .op = .end });
- const local_get = buildOpcode(.{ .op = .local_get });
const i64_extend32_s = buildOpcode(.{ .op = .extend, .valtype1 = .i64, .width = 32, .signedness = .signed });
const f64_reinterpret_i64 = buildOpcode(.{ .op = .reinterpret, .valtype1 = .f64, .valtype2 = .i64 });
- try testing.expectEqual(@as(wasm.Opcode, .i32_const), i32_const);
- try testing.expectEqual(@as(wasm.Opcode, .end), end);
- try testing.expectEqual(@as(wasm.Opcode, .local_get), local_get);
- try testing.expectEqual(@as(wasm.Opcode, .i64_extend32_s), i64_extend32_s);
- try testing.expectEqual(@as(wasm.Opcode, .f64_reinterpret_i64), f64_reinterpret_i64);
+ try testing.expectEqual(@as(std.wasm.Opcode, .i32_const), i32_const);
+ try testing.expectEqual(@as(std.wasm.Opcode, .i64_extend32_s), i64_extend32_s);
+ try testing.expectEqual(@as(std.wasm.Opcode, .f64_reinterpret_i64), f64_reinterpret_i64);
}
/// Hashmap to store generated `WValue` for each `Air.Inst.Ref`
pub const ValueTable = std.AutoArrayHashMapUnmanaged(Air.Inst.Ref, WValue);
-const CodeGen = @This();
-
-/// Reference to the function declaration the code
-/// section belongs to
-owner_nav: InternPool.Nav.Index,
-src_loc: Zcu.LazySrcLoc,
-/// Current block depth. Used to calculate the relative difference between a break
-/// and block
-block_depth: u32 = 0,
-air: Air,
-liveness: Liveness,
-gpa: mem.Allocator,
-debug_output: link.File.DebugInfoOutput,
-func_index: InternPool.Index,
-/// Contains a list of current branches.
-/// When we return from a branch, the branch will be popped from this list,
-/// which means branches can only contain references from within its own branch,
-/// or a branch higher (lower index) in the tree.
-branches: std.ArrayListUnmanaged(Branch) = .empty,
-/// Table to save `WValue`'s generated by an `Air.Inst`
-// values: ValueTable,
-/// Mapping from Air.Inst.Index to block ids
-blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, struct {
- label: u32,
- value: WValue,
-}) = .{},
-/// Maps `loop` instructions to their label. `br` to here repeats the loop.
-loops: std.AutoHashMapUnmanaged(Air.Inst.Index, u32) = .empty,
-/// `bytes` contains the wasm bytecode belonging to the 'code' section.
-code: *ArrayList(u8),
-/// The index the next local generated will have
-/// NOTE: arguments share the index with locals therefore the first variable
-/// will have the index that comes after the last argument's index
-local_index: u32 = 0,
-/// The index of the current argument.
-/// Used to track which argument is being referenced in `airArg`.
-arg_index: u32 = 0,
-/// If codegen fails, an error messages will be allocated and saved in `err_msg`
-err_msg: *Zcu.ErrorMsg,
-/// List of all locals' types generated throughout this declaration
-/// used to emit locals count at start of 'code' section.
-locals: std.ArrayListUnmanaged(u8),
-/// List of simd128 immediates. Each value is stored as an array of bytes.
-/// This list will only be populated for 128bit-simd values when the target features
-/// are enabled also.
-simd_immediates: std.ArrayListUnmanaged([16]u8) = .empty,
-/// The Target we're emitting (used to call intInfo)
-target: *const std.Target,
-/// Represents the wasm binary file that is being linked.
-bin_file: *link.File.Wasm,
-pt: Zcu.PerThread,
-/// List of MIR Instructions
-mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
-/// Contains extra data for MIR
-mir_extra: std.ArrayListUnmanaged(u32) = .empty,
-/// When a function is executing, we store the the current stack pointer's value within this local.
-/// This value is then used to restore the stack pointer to the original value at the return of the function.
-initial_stack_value: WValue = .none,
-/// The current stack pointer subtracted with the stack size. From this value, we will calculate
-/// all offsets of the stack values.
-bottom_stack_value: WValue = .none,
-/// Arguments of this function declaration
-/// This will be set after `resolveCallingConventionValues`
-args: []WValue = &.{},
-/// This will only be `.none` if the function returns void, or returns an immediate.
-/// When it returns a pointer to the stack, the `.local` tag will be active and must be populated
-/// before this function returns its execution to the caller.
-return_value: WValue = .none,
-/// The size of the stack this function occupies. In the function prologue
-/// we will move the stack pointer by this number, forward aligned with the `stack_alignment`.
-stack_size: u32 = 0,
-/// The stack alignment, which is 16 bytes by default. This is specified by the
-/// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
-/// and also what the llvm backend will emit.
-/// However, local variables or the usage of `incoming_stack_alignment` in a `CallingConvention` can overwrite this default.
-stack_alignment: Alignment = .@"16",
-
-// For each individual Wasm valtype we store a seperate free list which
-// allows us to re-use locals that are no longer used. e.g. a temporary local.
-/// A list of indexes which represents a local of valtype `i32`.
-/// It is illegal to store a non-i32 valtype in this list.
-free_locals_i32: std.ArrayListUnmanaged(u32) = .empty,
-/// A list of indexes which represents a local of valtype `i64`.
-/// It is illegal to store a non-i64 valtype in this list.
-free_locals_i64: std.ArrayListUnmanaged(u32) = .empty,
-/// A list of indexes which represents a local of valtype `f32`.
-/// It is illegal to store a non-f32 valtype in this list.
-free_locals_f32: std.ArrayListUnmanaged(u32) = .empty,
-/// A list of indexes which represents a local of valtype `f64`.
-/// It is illegal to store a non-f64 valtype in this list.
-free_locals_f64: std.ArrayListUnmanaged(u32) = .empty,
-/// A list of indexes which represents a local of valtype `v127`.
-/// It is illegal to store a non-v128 valtype in this list.
-free_locals_v128: std.ArrayListUnmanaged(u32) = .empty,
-
-/// When in debug mode, this tracks if no `finishAir` was missed.
-/// Forgetting to call `finishAir` will cause the result to not be
-/// stored in our `values` map and therefore cause bugs.
-air_bookkeeping: @TypeOf(bookkeeping_init) = bookkeeping_init,
-
const bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {};
const InnerError = error{
@@ -746,38 +696,33 @@ const InnerError = error{
Overflow,
} || link.File.UpdateDebugInfoError;
-pub fn deinit(func: *CodeGen) void {
- // in case of an error and we still have branches
- for (func.branches.items) |*branch| {
- branch.deinit(func.gpa);
- }
- func.branches.deinit(func.gpa);
- func.blocks.deinit(func.gpa);
- func.loops.deinit(func.gpa);
- func.locals.deinit(func.gpa);
- func.simd_immediates.deinit(func.gpa);
- func.mir_instructions.deinit(func.gpa);
- func.mir_extra.deinit(func.gpa);
- func.free_locals_i32.deinit(func.gpa);
- func.free_locals_i64.deinit(func.gpa);
- func.free_locals_f32.deinit(func.gpa);
- func.free_locals_f64.deinit(func.gpa);
- func.free_locals_v128.deinit(func.gpa);
- func.* = undefined;
+pub fn deinit(cg: *CodeGen) void {
+ const gpa = cg.gpa;
+ for (cg.branches.items) |*branch| branch.deinit(gpa);
+ cg.branches.deinit(gpa);
+ cg.blocks.deinit(gpa);
+ cg.loops.deinit(gpa);
+ cg.simd_immediates.deinit(gpa);
+ cg.free_locals_i32.deinit(gpa);
+ cg.free_locals_i64.deinit(gpa);
+ cg.free_locals_f32.deinit(gpa);
+ cg.free_locals_f64.deinit(gpa);
+ cg.free_locals_v128.deinit(gpa);
+ cg.* = undefined;
}
-/// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig
-fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError {
- func.err_msg = try Zcu.ErrorMsg.create(func.gpa, func.src_loc, fmt, args);
- return error.CodegenFail;
+fn fail(cg: *CodeGen, comptime fmt: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
+ const zcu = cg.pt.zcu;
+ const func = zcu.funcInfo(cg.func_index);
+ return zcu.codegenFail(func.owner_nav, fmt, args);
}
/// Resolves the `WValue` for the given instruction `inst`
/// When the given instruction has a `Value`, it returns a constant instead
-fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
- var branch_index = func.branches.items.len;
+fn resolveInst(cg: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
+ var branch_index = cg.branches.items.len;
while (branch_index > 0) : (branch_index -= 1) {
- const branch = func.branches.items[branch_index - 1];
+ const branch = cg.branches.items[branch_index - 1];
if (branch.values.get(ref)) |value| {
return value;
}
@@ -787,16 +732,16 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
// means we must generate it from a constant.
// We always store constants in the most outer branch as they must never
// be removed. The most outer branch is always at index 0.
- const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref);
+ const gop = try cg.branches.items[0].values.getOrPut(cg.gpa, ref);
assert(!gop.found_existing);
- const pt = func.pt;
+ const pt = cg.pt;
const zcu = pt.zcu;
- const val = (try func.air.value(ref, pt)).?;
- const ty = func.typeOf(ref);
+ const val = (try cg.air.value(ref, pt)).?;
+ const ty = cg.typeOf(ref);
if (!ty.hasRuntimeBitsIgnoreComptime(zcu) and !ty.isInt(zcu) and !ty.isError(zcu)) {
gop.value_ptr.* = .none;
- return gop.value_ptr.*;
+ return .none;
}
// When we need to pass the value by reference (such as a struct), we will
@@ -805,30 +750,24 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
//
// In the other cases, we will simply lower the constant to a value that fits
// into a single local (such as a pointer, integer, bool, etc).
- const result: WValue = if (isByRef(ty, pt, func.target.*))
- switch (try func.bin_file.lowerUav(pt, val.toIntern(), .none, func.src_loc)) {
- .mcv => |mcv| .{ .memory = mcv.load_symbol },
- .fail => |err_msg| {
- func.err_msg = err_msg;
- return error.CodegenFail;
- },
- }
+ const result: WValue = if (isByRef(ty, zcu, cg.target))
+ .{ .uav_ref = .{ .ip_index = val.toIntern() } }
else
- try func.lowerConstant(val, ty);
+ try cg.lowerConstant(val, ty);
gop.value_ptr.* = result;
return result;
}
/// NOTE: if result == .stack, it will be stored in .local
-fn finishAir(func: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []const Air.Inst.Ref) InnerError!void {
+fn finishAir(cg: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []const Air.Inst.Ref) InnerError!void {
assert(operands.len <= Liveness.bpi - 1);
- var tomb_bits = func.liveness.getTombBits(inst);
+ var tomb_bits = cg.liveness.getTombBits(inst);
for (operands) |operand| {
const dies = @as(u1, @truncate(tomb_bits)) != 0;
tomb_bits >>= 1;
if (!dies) continue;
- processDeath(func, operand);
+ processDeath(cg, operand);
}
// results of `none` can never be referenced.
@@ -836,13 +775,13 @@ fn finishAir(func: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []c
const trackable_result = if (result != .stack)
result
else
- try result.toLocal(func, func.typeOfIndex(inst));
- const branch = func.currentBranch();
+ try result.toLocal(cg, cg.typeOfIndex(inst));
+ const branch = cg.currentBranch();
branch.values.putAssumeCapacityNoClobber(inst.toRef(), trackable_result);
}
if (std.debug.runtime_safety) {
- func.air_bookkeeping += 1;
+ cg.air_bookkeeping += 1;
}
}
@@ -855,8 +794,8 @@ const Branch = struct {
}
};
-inline fn currentBranch(func: *CodeGen) *Branch {
- return &func.branches.items[func.branches.items.len - 1];
+inline fn currentBranch(cg: *CodeGen) *Branch {
+ return &cg.branches.items[cg.branches.items.len - 1];
}
const BigTomb = struct {
@@ -883,131 +822,143 @@ const BigTomb = struct {
}
};
-fn iterateBigTomb(func: *CodeGen, inst: Air.Inst.Index, operand_count: usize) !BigTomb {
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, operand_count + 1);
+fn iterateBigTomb(cg: *CodeGen, inst: Air.Inst.Index, operand_count: usize) !BigTomb {
+ try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, operand_count + 1);
return BigTomb{
- .gen = func,
+ .gen = cg,
.inst = inst,
- .lbt = func.liveness.iterateBigTomb(inst),
+ .lbt = cg.liveness.iterateBigTomb(inst),
};
}
-fn processDeath(func: *CodeGen, ref: Air.Inst.Ref) void {
+fn processDeath(cg: *CodeGen, ref: Air.Inst.Ref) void {
if (ref.toIndex() == null) return;
// Branches are currently only allowed to free locals allocated
// within their own branch.
// TODO: Upon branch consolidation free any locals if needed.
- const value = func.currentBranch().values.getPtr(ref) orelse return;
+ const value = cg.currentBranch().values.getPtr(ref) orelse return;
if (value.* != .local) return;
- const reserved_indexes = func.args.len + @intFromBool(func.return_value != .none);
+ const reserved_indexes = cg.args.len + @intFromBool(cg.return_value != .none);
if (value.local.value < reserved_indexes) {
return; // function arguments can never be re-used
}
log.debug("Decreasing reference for ref: %{d}, using local '{d}'", .{ @intFromEnum(ref.toIndex().?), value.local.value });
value.local.references -= 1; // if this panics, a call to `reuseOperand` was forgotten by the developer
if (value.local.references == 0) {
- value.free(func);
+ value.free(cg);
}
}
-/// Appends a MIR instruction and returns its index within the list of instructions
-fn addInst(func: *CodeGen, inst: Mir.Inst) error{OutOfMemory}!void {
- try func.mir_instructions.append(func.gpa, inst);
+fn addInst(cg: *CodeGen, inst: Mir.Inst) error{OutOfMemory}!void {
+ try cg.mir_instructions.append(cg.gpa, inst);
+}
+
+fn addTag(cg: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
+ try cg.addInst(.{ .tag = tag, .data = .{ .tag = {} } });
+}
+
+fn addExtended(cg: *CodeGen, opcode: std.wasm.MiscOpcode) error{OutOfMemory}!void {
+ const extra_index = cg.extraLen();
+ try cg.mir_extra.append(cg.gpa, @intFromEnum(opcode));
+ try cg.addInst(.{ .tag = .misc_prefix, .data = .{ .payload = extra_index } });
}
-fn addTag(func: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
- try func.addInst(.{ .tag = tag, .data = .{ .tag = {} } });
+fn addLabel(cg: *CodeGen, tag: Mir.Inst.Tag, label: u32) error{OutOfMemory}!void {
+ try cg.addInst(.{ .tag = tag, .data = .{ .label = label } });
}
-fn addExtended(func: *CodeGen, opcode: wasm.MiscOpcode) error{OutOfMemory}!void {
- const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
- try func.mir_extra.append(func.gpa, @intFromEnum(opcode));
- try func.addInst(.{ .tag = .misc_prefix, .data = .{ .payload = extra_index } });
+fn addLocal(cg: *CodeGen, tag: Mir.Inst.Tag, local: u32) error{OutOfMemory}!void {
+ try cg.addInst(.{ .tag = tag, .data = .{ .local = local } });
}
-fn addLabel(func: *CodeGen, tag: Mir.Inst.Tag, label: u32) error{OutOfMemory}!void {
- try func.addInst(.{ .tag = tag, .data = .{ .label = label } });
+fn addFuncTy(cg: *CodeGen, tag: Mir.Inst.Tag, i: Wasm.FunctionType.Index) error{OutOfMemory}!void {
+ try cg.addInst(.{ .tag = tag, .data = .{ .func_ty = i } });
}
/// Accepts an unsigned 32bit integer rather than a signed integer to
/// prevent us from having to bitcast multiple times as most values
/// within codegen are represented as unsigned rather than signed.
-fn addImm32(func: *CodeGen, imm: u32) error{OutOfMemory}!void {
- try func.addInst(.{ .tag = .i32_const, .data = .{ .imm32 = @bitCast(imm) } });
+fn addImm32(cg: *CodeGen, imm: u32) error{OutOfMemory}!void {
+ try cg.addInst(.{ .tag = .i32_const, .data = .{ .imm32 = @bitCast(imm) } });
}
/// Accepts an unsigned 64bit integer rather than a signed integer to
/// prevent us from having to bitcast multiple times as most values
/// within codegen are represented as unsigned rather than signed.
-fn addImm64(func: *CodeGen, imm: u64) error{OutOfMemory}!void {
- const extra_index = try func.addExtra(Mir.Imm64.fromU64(imm));
- try func.addInst(.{ .tag = .i64_const, .data = .{ .payload = extra_index } });
+fn addImm64(cg: *CodeGen, imm: u64) error{OutOfMemory}!void {
+ const extra_index = try cg.addExtra(Mir.Imm64.init(imm));
+ try cg.addInst(.{ .tag = .i64_const, .data = .{ .payload = extra_index } });
}
/// Accepts the index into the list of 128bit-immediates
-fn addImm128(func: *CodeGen, index: u32) error{OutOfMemory}!void {
- const simd_values = func.simd_immediates.items[index];
- const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
+fn addImm128(cg: *CodeGen, index: u32) error{OutOfMemory}!void {
+ const simd_values = cg.simd_immediates.items[index];
+ const extra_index = cg.extraLen();
// tag + 128bit value
- try func.mir_extra.ensureUnusedCapacity(func.gpa, 5);
- func.mir_extra.appendAssumeCapacity(std.wasm.simdOpcode(.v128_const));
- func.mir_extra.appendSliceAssumeCapacity(@alignCast(mem.bytesAsSlice(u32, &simd_values)));
- try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+ try cg.mir_extra.ensureUnusedCapacity(cg.gpa, 5);
+ cg.mir_extra.appendAssumeCapacity(@intFromEnum(std.wasm.SimdOpcode.v128_const));
+ cg.mir_extra.appendSliceAssumeCapacity(@alignCast(mem.bytesAsSlice(u32, &simd_values)));
+ try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
}
-fn addFloat64(func: *CodeGen, float: f64) error{OutOfMemory}!void {
- const extra_index = try func.addExtra(Mir.Float64.fromFloat64(float));
- try func.addInst(.{ .tag = .f64_const, .data = .{ .payload = extra_index } });
+fn addFloat64(cg: *CodeGen, float: f64) error{OutOfMemory}!void {
+ const extra_index = try cg.addExtra(Mir.Float64.init(float));
+ try cg.addInst(.{ .tag = .f64_const, .data = .{ .payload = extra_index } });
}
/// Inserts an instruction to load/store from/to wasm's linear memory dependent on the given `tag`.
-fn addMemArg(func: *CodeGen, tag: Mir.Inst.Tag, mem_arg: Mir.MemArg) error{OutOfMemory}!void {
- const extra_index = try func.addExtra(mem_arg);
- try func.addInst(.{ .tag = tag, .data = .{ .payload = extra_index } });
+fn addMemArg(cg: *CodeGen, tag: Mir.Inst.Tag, mem_arg: Mir.MemArg) error{OutOfMemory}!void {
+ const extra_index = try cg.addExtra(mem_arg);
+ try cg.addInst(.{ .tag = tag, .data = .{ .payload = extra_index } });
}
/// Inserts an instruction from the 'atomics' feature which accesses wasm's linear memory dependent on the
/// given `tag`.
-fn addAtomicMemArg(func: *CodeGen, tag: wasm.AtomicsOpcode, mem_arg: Mir.MemArg) error{OutOfMemory}!void {
- const extra_index = try func.addExtra(@as(struct { val: u32 }, .{ .val = wasm.atomicsOpcode(tag) }));
- _ = try func.addExtra(mem_arg);
- try func.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } });
+fn addAtomicMemArg(cg: *CodeGen, tag: std.wasm.AtomicsOpcode, mem_arg: Mir.MemArg) error{OutOfMemory}!void {
+ const extra_index = try cg.addExtra(@as(struct { val: u32 }, .{ .val = @intFromEnum(tag) }));
+ _ = try cg.addExtra(mem_arg);
+ try cg.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } });
}
/// Helper function to emit atomic mir opcodes.
-fn addAtomicTag(func: *CodeGen, tag: wasm.AtomicsOpcode) error{OutOfMemory}!void {
- const extra_index = try func.addExtra(@as(struct { val: u32 }, .{ .val = wasm.atomicsOpcode(tag) }));
- try func.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } });
+fn addAtomicTag(cg: *CodeGen, tag: std.wasm.AtomicsOpcode) error{OutOfMemory}!void {
+ const extra_index = try cg.addExtra(@as(struct { val: u32 }, .{ .val = @intFromEnum(tag) }));
+ try cg.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } });
}
/// Appends entries to `mir_extra` based on the type of `extra`.
/// Returns the index into `mir_extra`
-fn addExtra(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
+fn addExtra(cg: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
const fields = std.meta.fields(@TypeOf(extra));
- try func.mir_extra.ensureUnusedCapacity(func.gpa, fields.len);
- return func.addExtraAssumeCapacity(extra);
+ try cg.mir_extra.ensureUnusedCapacity(cg.gpa, fields.len);
+ return cg.addExtraAssumeCapacity(extra);
}
/// Appends entries to `mir_extra` based on the type of `extra`.
/// Returns the index into `mir_extra`
-fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
+fn addExtraAssumeCapacity(cg: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
const fields = std.meta.fields(@TypeOf(extra));
- const result = @as(u32, @intCast(func.mir_extra.items.len));
+ const result = cg.extraLen();
inline for (fields) |field| {
- func.mir_extra.appendAssumeCapacity(switch (field.type) {
+ cg.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
+ i32 => @bitCast(@field(extra, field.name)),
+ InternPool.Index,
+ InternPool.Nav.Index,
+ Wasm.UavsObjIndex,
+ Wasm.UavsExeIndex,
+ => @intFromEnum(@field(extra, field.name)),
else => |field_type| @compileError("Unsupported field type " ++ @typeName(field_type)),
});
}
return result;
}
-/// Using a given `Type`, returns the corresponding valtype for .auto callconv
-fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype {
- const zcu = pt.zcu;
+/// For `std.builtin.CallingConvention.auto`.
+pub fn typeToValtype(ty: Type, zcu: *const Zcu, target: *const std.Target) std.wasm.Valtype {
const ip = &zcu.intern_pool;
return switch (ty.zigTypeTag(zcu)) {
- .float => switch (ty.floatBits(target)) {
+ .float => switch (ty.floatBits(target.*)) {
16 => .i32, // stored/loaded as u16
32 => .f32,
64 => .f64,
@@ -1022,19 +973,20 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype {
.@"struct" => blk: {
if (zcu.typeToPackedStruct(ty)) |packed_struct| {
const backing_int_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
- break :blk typeToValtype(backing_int_ty, pt, target);
+ break :blk typeToValtype(backing_int_ty, zcu, target);
} else {
break :blk .i32;
}
},
- .vector => switch (determineSimdStoreStrategy(ty, zcu, target)) {
+ .vector => switch (CodeGen.determineSimdStoreStrategy(ty, zcu, target)) {
.direct => .v128,
.unrolled => .i32,
},
.@"union" => switch (ty.containerLayout(zcu)) {
- .@"packed" => blk: {
- const int_ty = pt.intType(.unsigned, @as(u16, @intCast(ty.bitSize(zcu)))) catch @panic("out of memory");
- break :blk typeToValtype(int_ty, pt, target);
+ .@"packed" => switch (ty.bitSize(zcu)) {
+ 0...32 => .i32,
+ 33...64 => .i64,
+ else => .i32,
},
else => .i32,
},
@@ -1042,42 +994,94 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype {
};
}
-/// Using a given `Type`, returns the byte representation of its wasm value type
-fn genValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) u8 {
- return wasm.valtype(typeToValtype(ty, pt, target));
-}
-
/// Using a given `Type`, returns the corresponding wasm value type
-/// Differently from `genValtype` this also allows `void` to create a block
+/// Differently from `typeToValtype` this also allows `void` to create a block
/// with no return type
-fn genBlockType(ty: Type, pt: Zcu.PerThread, target: std.Target) u8 {
+fn genBlockType(ty: Type, zcu: *const Zcu, target: *const std.Target) std.wasm.BlockType {
return switch (ty.ip_index) {
- .void_type, .noreturn_type => wasm.block_empty,
- else => genValtype(ty, pt, target),
+ .void_type, .noreturn_type => .empty,
+ else => .fromValtype(typeToValtype(ty, zcu, target)),
};
}
/// Writes the bytecode depending on the given `WValue` in `val`
-fn emitWValue(func: *CodeGen, value: WValue) InnerError!void {
+fn emitWValue(cg: *CodeGen, value: WValue) InnerError!void {
switch (value) {
.dead => unreachable, // reference to free'd `WValue` (missing reuseOperand?)
.none, .stack => {}, // no-op
- .local => |idx| try func.addLabel(.local_get, idx.value),
- .imm32 => |val| try func.addImm32(val),
- .imm64 => |val| try func.addImm64(val),
- .imm128 => |val| try func.addImm128(val),
- .float32 => |val| try func.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }),
- .float64 => |val| try func.addFloat64(val),
- .memory => |ptr| {
- const extra_index = try func.addExtra(Mir.Memory{ .pointer = ptr, .offset = 0 });
- try func.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } });
+ .local => |idx| try cg.addLocal(.local_get, idx.value),
+ .imm32 => |val| try cg.addImm32(val),
+ .imm64 => |val| try cg.addImm64(val),
+ .imm128 => |val| try cg.addImm128(val),
+ .float32 => |val| try cg.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }),
+ .float64 => |val| try cg.addFloat64(val),
+ .nav_ref => |nav_ref| {
+ const wasm = cg.wasm;
+ const comp = wasm.base.comp;
+ const zcu = comp.zcu.?;
+ const ip = &zcu.intern_pool;
+ if (ip.getNav(nav_ref.nav_index).isFn(ip)) {
+ assert(nav_ref.offset == 0);
+ const gop = try wasm.zcu_indirect_function_set.getOrPut(comp.gpa, nav_ref.nav_index);
+ if (!gop.found_existing) gop.value_ptr.* = {};
+ try cg.addInst(.{
+ .tag = .func_ref,
+ .data = .{ .indirect_function_table_index = @enumFromInt(gop.index) },
+ });
+ } else if (nav_ref.offset == 0) {
+ try cg.addInst(.{ .tag = .nav_ref, .data = .{ .nav_index = nav_ref.nav_index } });
+ } else {
+ try cg.addInst(.{
+ .tag = .nav_ref_off,
+ .data = .{
+ .payload = try cg.addExtra(Mir.NavRefOff{
+ .nav_index = nav_ref.nav_index,
+ .offset = nav_ref.offset,
+ }),
+ },
+ });
+ }
},
- .memory_offset => |mem_off| {
- const extra_index = try func.addExtra(Mir.Memory{ .pointer = mem_off.pointer, .offset = mem_off.offset });
- try func.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } });
+ .uav_ref => |uav| {
+ const wasm = cg.wasm;
+ const comp = wasm.base.comp;
+ const is_obj = comp.config.output_mode == .Obj;
+ const zcu = comp.zcu.?;
+ const ip = &zcu.intern_pool;
+ if (ip.isFunctionType(ip.typeOf(uav.ip_index))) {
+ assert(uav.offset == 0);
+ const owner_nav = ip.toFunc(uav.ip_index).owner_nav;
+ const gop = try wasm.zcu_indirect_function_set.getOrPut(comp.gpa, owner_nav);
+ if (!gop.found_existing) gop.value_ptr.* = {};
+ try cg.addInst(.{
+ .tag = .func_ref,
+ .data = .{ .indirect_function_table_index = @enumFromInt(gop.index) },
+ });
+ } else if (uav.offset == 0) {
+ try cg.addInst(.{
+ .tag = .uav_ref,
+ .data = if (is_obj) .{
+ .uav_obj = try wasm.refUavObj(uav.ip_index, uav.orig_ptr_ty),
+ } else .{
+ .uav_exe = try wasm.refUavExe(uav.ip_index, uav.orig_ptr_ty),
+ },
+ });
+ } else {
+ try cg.addInst(.{
+ .tag = .uav_ref_off,
+ .data = .{
+ .payload = if (is_obj) try cg.addExtra(Mir.UavRefOffObj{
+ .uav_obj = try wasm.refUavObj(uav.ip_index, uav.orig_ptr_ty),
+ .offset = uav.offset,
+ }) else try cg.addExtra(Mir.UavRefOffExe{
+ .uav_exe = try wasm.refUavExe(uav.ip_index, uav.orig_ptr_ty),
+ .offset = uav.offset,
+ }),
+ },
+ });
+ }
},
- .function_index => |index| try func.addLabel(.function_index, index), // write function index and generate relocation
- .stack_offset => try func.addLabel(.local_get, func.bottom_stack_value.local.value), // caller must ensure to address the offset
+ .stack_offset => try cg.addLocal(.local_get, cg.bottom_stack_value.local.value), // caller must ensure to address the offset
}
}
@@ -1085,7 +1089,7 @@ fn emitWValue(func: *CodeGen, value: WValue) InnerError!void {
/// The old `WValue` found at instruction `ref` is then replaced by the
/// modified `WValue` and returned. When given a non-local or non-stack-offset,
/// returns the given `operand` itfunc instead.
-fn reuseOperand(func: *CodeGen, ref: Air.Inst.Ref, operand: WValue) WValue {
+fn reuseOperand(cg: *CodeGen, ref: Air.Inst.Ref, operand: WValue) WValue {
if (operand != .local and operand != .stack_offset) return operand;
var new_value = operand;
switch (new_value) {
@@ -1093,17 +1097,17 @@ fn reuseOperand(func: *CodeGen, ref: Air.Inst.Ref, operand: WValue) WValue {
.stack_offset => |*stack_offset| stack_offset.references += 1,
else => unreachable,
}
- const old_value = func.getResolvedInst(ref);
+ const old_value = cg.getResolvedInst(ref);
old_value.* = new_value;
return new_value;
}
/// From a reference, returns its resolved `WValue`.
/// It's illegal to provide a `Air.Inst.Ref` that hasn't been resolved yet.
-fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue {
- var index = func.branches.items.len;
+fn getResolvedInst(cg: *CodeGen, ref: Air.Inst.Ref) *WValue {
+ var index = cg.branches.items.len;
while (index > 0) : (index -= 1) {
- const branch = func.branches.items[index - 1];
+ const branch = cg.branches.items[index - 1];
if (branch.values.getPtr(ref)) |value| {
return value;
}
@@ -1113,243 +1117,238 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue {
/// Creates one locals for a given `Type`.
/// Returns a corresponding `Wvalue` with `local` as active tag
-fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
- const pt = func.pt;
- const valtype = typeToValtype(ty, pt, func.target.*);
+fn allocLocal(cg: *CodeGen, ty: Type) InnerError!WValue {
+ const zcu = cg.pt.zcu;
+ const valtype = typeToValtype(ty, zcu, cg.target);
const index_or_null = switch (valtype) {
- .i32 => func.free_locals_i32.popOrNull(),
- .i64 => func.free_locals_i64.popOrNull(),
- .f32 => func.free_locals_f32.popOrNull(),
- .f64 => func.free_locals_f64.popOrNull(),
- .v128 => func.free_locals_v128.popOrNull(),
+ .i32 => cg.free_locals_i32.popOrNull(),
+ .i64 => cg.free_locals_i64.popOrNull(),
+ .f32 => cg.free_locals_f32.popOrNull(),
+ .f64 => cg.free_locals_f64.popOrNull(),
+ .v128 => cg.free_locals_v128.popOrNull(),
};
if (index_or_null) |index| {
log.debug("reusing local ({d}) of type {}", .{ index, valtype });
return .{ .local = .{ .value = index, .references = 1 } };
}
log.debug("new local of type {}", .{valtype});
- return func.ensureAllocLocal(ty);
+ return cg.ensureAllocLocal(ty);
}
/// Ensures a new local will be created. This is useful when it's useful
/// to use a zero-initialized local.
-fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
- const pt = func.pt;
- try func.locals.append(func.gpa, genValtype(ty, pt, func.target.*));
- const initial_index = func.local_index;
- func.local_index += 1;
+fn ensureAllocLocal(cg: *CodeGen, ty: Type) InnerError!WValue {
+ const zcu = cg.pt.zcu;
+ try cg.locals.append(cg.gpa, typeToValtype(ty, zcu, cg.target));
+ const initial_index = cg.local_index;
+ cg.local_index += 1;
return .{ .local = .{ .value = initial_index, .references = 1 } };
}
-/// Generates a `wasm.Type` from a given function type.
-/// Memory is owned by the caller.
-fn genFunctype(
- gpa: Allocator,
- cc: std.builtin.CallingConvention,
- params: []const InternPool.Index,
- return_type: Type,
- pt: Zcu.PerThread,
- target: std.Target,
-) !wasm.Type {
- const zcu = pt.zcu;
- var temp_params = std.ArrayList(wasm.Valtype).init(gpa);
- defer temp_params.deinit();
- var returns = std.ArrayList(wasm.Valtype).init(gpa);
- defer returns.deinit();
-
- if (firstParamSRet(cc, return_type, pt, target)) {
- try temp_params.append(.i32); // memory address is always a 32-bit handle
- } else if (return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
- if (cc == .wasm_watc) {
- const res_classes = abi.classifyType(return_type, zcu);
- assert(res_classes[0] == .direct and res_classes[1] == .none);
- const scalar_type = abi.scalarType(return_type, zcu);
- try returns.append(typeToValtype(scalar_type, pt, target));
- } else {
- try returns.append(typeToValtype(return_type, pt, target));
+pub const Function = extern struct {
+ /// Index into `Wasm.mir_instructions`.
+ mir_off: u32,
+ /// This is unused except for as a safety slice bound and could be removed.
+ mir_len: u32,
+ /// Index into `Wasm.mir_extra`.
+ mir_extra_off: u32,
+ /// This is unused except for as a safety slice bound and could be removed.
+ mir_extra_len: u32,
+ locals_off: u32,
+ locals_len: u32,
+ prologue: Prologue,
+
+ pub const Prologue = extern struct {
+ flags: Flags,
+ sp_local: u32,
+ stack_size: u32,
+ bottom_stack_local: u32,
+
+ pub const Flags = packed struct(u32) {
+ stack_alignment: Alignment,
+ padding: u26 = 0,
+ };
+
+ pub const none: Prologue = .{
+ .sp_local = 0,
+ .flags = .{ .stack_alignment = .none },
+ .stack_size = 0,
+ .bottom_stack_local = 0,
+ };
+
+ pub fn isNone(p: *const Prologue) bool {
+ return p.flags.stack_alignment != .none;
}
- } else if (return_type.isError(zcu)) {
- try returns.append(.i32);
- }
-
- // param types
- for (params) |param_type_ip| {
- const param_type = Type.fromInterned(param_type_ip);
- if (!param_type.hasRuntimeBitsIgnoreComptime(zcu)) continue;
-
- switch (cc) {
- .wasm_watc => {
- const param_classes = abi.classifyType(param_type, zcu);
- if (param_classes[1] == .none) {
- if (param_classes[0] == .direct) {
- const scalar_type = abi.scalarType(param_type, zcu);
- try temp_params.append(typeToValtype(scalar_type, pt, target));
- } else {
- try temp_params.append(typeToValtype(param_type, pt, target));
- }
- } else {
- // i128/f128
- try temp_params.append(.i64);
- try temp_params.append(.i64);
- }
- },
- else => try temp_params.append(typeToValtype(param_type, pt, target)),
+ };
+
+ pub fn lower(f: *Function, wasm: *Wasm, code: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
+ const gpa = wasm.base.comp.gpa;
+
+ // Write the locals in the prologue of the function body.
+ const locals = wasm.all_zcu_locals.items[f.locals_off..][0..f.locals_len];
+ try code.ensureUnusedCapacity(gpa, 5 + locals.len * 6 + 38);
+
+ std.leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(locals.len))) catch unreachable;
+ for (locals) |local| {
+ std.leb.writeUleb128(code.fixedWriter(), @as(u32, 1)) catch unreachable;
+ code.appendAssumeCapacity(@intFromEnum(local));
+ }
+
+ // Stack management section of function prologue.
+ const stack_alignment = f.prologue.flags.stack_alignment;
+ if (stack_alignment.toByteUnits()) |align_bytes| {
+ const sp_global: Wasm.GlobalIndex = .stack_pointer;
+ // load stack pointer
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_get));
+ std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
+ // store stack pointer so we can restore it when we return from the function
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
+ leb.writeUleb128(code.fixedWriter(), f.prologue.sp_local) catch unreachable;
+ // get the total stack size
+ const aligned_stack: i32 = @intCast(stack_alignment.forward(f.prologue.stack_size));
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
+ leb.writeIleb128(code.fixedWriter(), aligned_stack) catch unreachable;
+ // subtract it from the current stack pointer
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_sub));
+ // Get negative stack alignment
+ const neg_stack_align = @as(i32, @intCast(align_bytes)) * -1;
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
+ leb.writeIleb128(code.fixedWriter(), neg_stack_align) catch unreachable;
+ // Bitwise-and the value to get the new stack pointer to ensure the
+ // pointers are aligned with the abi alignment.
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_and));
+ // The bottom will be used to calculate all stack pointer offsets.
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
+ leb.writeUleb128(code.fixedWriter(), f.prologue.bottom_stack_local) catch unreachable;
+ // Store the current stack pointer value into the global stack pointer so other function calls will
+ // start from this value instead and not overwrite the current stack.
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
+ std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
}
+
+ var emit: Emit = .{
+ .mir = .{
+ .instruction_tags = wasm.mir_instructions.items(.tag)[f.mir_off..][0..f.mir_len],
+ .instruction_datas = wasm.mir_instructions.items(.data)[f.mir_off..][0..f.mir_len],
+ .extra = wasm.mir_extra.items[f.mir_extra_off..][0..f.mir_extra_len],
+ },
+ .wasm = wasm,
+ .code = code,
+ };
+ try emit.lowerToCode();
}
+};
- return wasm.Type{
- .params = try temp_params.toOwnedSlice(),
- .returns = try returns.toOwnedSlice(),
- };
-}
+pub const Error = error{
+ OutOfMemory,
+ /// Compiler was asked to operate on a number larger than supported.
+ Overflow,
+ /// Indicates the error is already stored in Zcu `failed_codegen`.
+ CodegenFail,
+};
-pub fn generate(
- bin_file: *link.File,
+pub fn function(
+ wasm: *Wasm,
pt: Zcu.PerThread,
- src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
- code: *std.ArrayList(u8),
- debug_output: link.File.DebugInfoOutput,
-) codegen.CodeGenError!codegen.Result {
+) Error!Function {
const zcu = pt.zcu;
const gpa = zcu.gpa;
- const func = zcu.funcInfo(func_index);
- const file_scope = zcu.navFileScope(func.owner_nav);
+ const cg = zcu.funcInfo(func_index);
+ const file_scope = zcu.navFileScope(cg.owner_nav);
const target = &file_scope.mod.resolved_target.result;
+ const fn_ty = zcu.navValue(cg.owner_nav).typeOf(zcu);
+ const fn_info = zcu.typeToFunc(fn_ty).?;
+ const ip = &zcu.intern_pool;
+ const fn_ty_index = try wasm.internFunctionType(fn_info.cc, fn_info.param_types.get(ip), .fromInterned(fn_info.return_type), target);
+ const returns = fn_ty_index.ptr(wasm).returns.slice(wasm);
+ const any_returns = returns.len != 0;
+
+ var cc_result = try resolveCallingConventionValues(zcu, fn_ty, target);
+ defer cc_result.deinit(gpa);
+
var code_gen: CodeGen = .{
.gpa = gpa,
.pt = pt,
.air = air,
.liveness = liveness,
- .code = code,
- .owner_nav = func.owner_nav,
- .src_loc = src_loc,
- .err_msg = undefined,
- .locals = .{},
+ .owner_nav = cg.owner_nav,
.target = target,
- .bin_file = bin_file.cast(.wasm).?,
- .debug_output = debug_output,
+ .ptr_size = switch (target.cpu.arch) {
+ .wasm32 => .wasm32,
+ .wasm64 => .wasm64,
+ else => unreachable,
+ },
+ .wasm = wasm,
.func_index = func_index,
+ .args = cc_result.args,
+ .return_value = cc_result.return_value,
+ .local_index = cc_result.local_index,
+ .mir_instructions = &wasm.mir_instructions,
+ .mir_extra = &wasm.mir_extra,
+ .locals = &wasm.all_zcu_locals,
+ .start_mir_extra_off = @intCast(wasm.mir_extra.items.len),
+ .start_locals_off = @intCast(wasm.all_zcu_locals.items.len),
};
defer code_gen.deinit();
- genFunc(&code_gen) catch |err| switch (err) {
- error.CodegenFail => return codegen.Result{ .fail = code_gen.err_msg },
- else => |e| return e,
+ return functionInner(&code_gen, any_returns) catch |err| switch (err) {
+ error.CodegenFail => return error.CodegenFail,
+ else => |e| return code_gen.fail("failed to generate function: {s}", .{@errorName(e)}),
};
-
- return codegen.Result.ok;
}
-fn genFunc(func: *CodeGen) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ip = &zcu.intern_pool;
- const fn_ty = zcu.navValue(func.owner_nav).typeOf(zcu);
- const fn_info = zcu.typeToFunc(fn_ty).?;
- var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*);
- defer func_type.deinit(func.gpa);
- _ = try func.bin_file.storeNavType(func.owner_nav, func_type);
-
- var cc_result = try func.resolveCallingConventionValues(fn_ty);
- defer cc_result.deinit(func.gpa);
+fn functionInner(cg: *CodeGen, any_returns: bool) InnerError!Function {
+ const wasm = cg.wasm;
+ const zcu = cg.pt.zcu;
- func.args = cc_result.args;
- func.return_value = cc_result.return_value;
+ const start_mir_off: u32 = @intCast(wasm.mir_instructions.len);
- try func.addTag(.dbg_prologue_end);
-
- try func.branches.append(func.gpa, .{});
+ try cg.branches.append(cg.gpa, .{});
// clean up outer branch
defer {
- var outer_branch = func.branches.pop();
- outer_branch.deinit(func.gpa);
- assert(func.branches.items.len == 0); // missing branch merge
+ var outer_branch = cg.branches.pop();
+ outer_branch.deinit(cg.gpa);
+ assert(cg.branches.items.len == 0); // missing branch merge
}
// Generate MIR for function body
- try func.genBody(func.air.getMainBody());
+ try cg.genBody(cg.air.getMainBody());
// In case we have a return value, but the last instruction is a noreturn (such as a while loop)
// we emit an unreachable instruction to tell the stack validator that part will never be reached.
- if (func_type.returns.len != 0 and func.air.instructions.len > 0) {
- const inst: Air.Inst.Index = @enumFromInt(func.air.instructions.len - 1);
- const last_inst_ty = func.typeOfIndex(inst);
+ if (any_returns and cg.air.instructions.len > 0) {
+ const inst: Air.Inst.Index = @enumFromInt(cg.air.instructions.len - 1);
+ const last_inst_ty = cg.typeOfIndex(inst);
if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(zcu) or last_inst_ty.isNoReturn(zcu)) {
- try func.addTag(.@"unreachable");
+ try cg.addTag(.@"unreachable");
}
}
// End of function body
- try func.addTag(.end);
-
- try func.addTag(.dbg_epilogue_begin);
-
- // check if we have to initialize and allocate anything into the stack frame.
- // If so, create enough stack space and insert the instructions at the front of the list.
- if (func.initial_stack_value != .none) {
- var prologue = std.ArrayList(Mir.Inst).init(func.gpa);
- defer prologue.deinit();
-
- const sp = @intFromEnum(func.bin_file.zig_object.?.stack_pointer_sym);
- // load stack pointer
- try prologue.append(.{ .tag = .global_get, .data = .{ .label = sp } });
- // store stack pointer so we can restore it when we return from the function
- try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } });
- // get the total stack size
- const aligned_stack = func.stack_alignment.forward(func.stack_size);
- try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(aligned_stack) } });
- // subtract it from the current stack pointer
- try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
- // Get negative stack alignment
- try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment.toByteUnits().?)) * -1 } });
- // Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment
- try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } });
- // store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets
- try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.bottom_stack_value.local.value } });
- // Store the current stack pointer value into the global stack pointer so other function calls will
- // start from this value instead and not overwrite the current stack.
- try prologue.append(.{ .tag = .global_set, .data = .{ .label = sp } });
-
- // reserve space and insert all prologue instructions at the front of the instruction list
- // We insert them in reserve order as there is no insertSlice in multiArrayList.
- try func.mir_instructions.ensureUnusedCapacity(func.gpa, prologue.items.len);
- for (prologue.items, 0..) |_, index| {
- const inst = prologue.items[prologue.items.len - 1 - index];
- func.mir_instructions.insertAssumeCapacity(0, inst);
- }
- }
-
- var mir: Mir = .{
- .instructions = func.mir_instructions.toOwnedSlice(),
- .extra = try func.mir_extra.toOwnedSlice(func.gpa),
- };
- defer mir.deinit(func.gpa);
-
- var emit: Emit = .{
- .mir = mir,
- .bin_file = func.bin_file,
- .code = func.code,
- .locals = func.locals.items,
- .owner_nav = func.owner_nav,
- .dbg_output = func.debug_output,
- .prev_di_line = 0,
- .prev_di_column = 0,
- .prev_di_offset = 0,
- };
-
- emit.emitMir() catch |err| switch (err) {
- error.EmitFail => {
- func.err_msg = emit.error_msg.?;
- return error.CodegenFail;
+ try cg.addTag(.end);
+ try cg.addTag(.dbg_epilogue_begin);
+
+ return .{
+ .mir_off = start_mir_off,
+ .mir_len = @intCast(wasm.mir_instructions.len - start_mir_off),
+ .mir_extra_off = cg.start_mir_extra_off,
+ .mir_extra_len = cg.extraLen(),
+ .locals_off = cg.start_locals_off,
+ .locals_len = @intCast(wasm.all_zcu_locals.items.len - cg.start_locals_off),
+ .prologue = if (cg.initial_stack_value == .none) .none else .{
+ .sp_local = cg.initial_stack_value.local.value,
+ .flags = .{ .stack_alignment = cg.stack_alignment },
+ .stack_size = cg.stack_size,
+ .bottom_stack_local = cg.bottom_stack_value.local.value,
},
- else => |e| return e,
};
}
const CallWValues = struct {
args: []WValue,
return_value: WValue,
+ local_index: u32,
fn deinit(values: *CallWValues, gpa: Allocator) void {
gpa.free(values.args);
@@ -1357,28 +1356,33 @@ const CallWValues = struct {
}
};
-fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn resolveCallingConventionValues(
+ zcu: *const Zcu,
+ fn_ty: Type,
+ target: *const std.Target,
+) Allocator.Error!CallWValues {
+ const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const fn_info = zcu.typeToFunc(fn_ty).?;
const cc = fn_info.cc;
+
var result: CallWValues = .{
.args = &.{},
.return_value = .none,
+ .local_index = 0,
};
if (cc == .naked) return result;
- var args = std.ArrayList(WValue).init(func.gpa);
+ var args = std.ArrayList(WValue).init(gpa);
defer args.deinit();
// Check if we store the result as a pointer to the stack rather than
// by value
- if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) {
+ if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), zcu, target)) {
// the sret arg will be passed as first argument, therefore we
// set the `return_value` before allocating locals for regular args.
- result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } };
- func.local_index += 1;
+ result.return_value = .{ .local = .{ .value = result.local_index, .references = 1 } };
+ result.local_index += 1;
}
switch (cc) {
@@ -1388,8 +1392,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
continue;
}
- try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
- func.local_index += 1;
+ try args.append(.{ .local = .{ .value = result.local_index, .references = 1 } });
+ result.local_index += 1;
}
},
.wasm_watc => {
@@ -1397,23 +1401,28 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
const ty_classes = abi.classifyType(Type.fromInterned(ty), zcu);
for (ty_classes) |class| {
if (class == .none) continue;
- try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
- func.local_index += 1;
+ try args.append(.{ .local = .{ .value = result.local_index, .references = 1 } });
+ result.local_index += 1;
}
}
},
- else => return func.fail("calling convention '{s}' not supported for Wasm", .{@tagName(cc)}),
+ else => unreachable, // Frontend is responsible for emitting an error earlier.
}
result.args = try args.toOwnedSlice();
return result;
}
-fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.PerThread, target: std.Target) bool {
+pub fn firstParamSRet(
+ cc: std.builtin.CallingConvention,
+ return_type: Type,
+ zcu: *const Zcu,
+ target: *const std.Target,
+) bool {
switch (cc) {
.@"inline" => unreachable,
- .auto => return isByRef(return_type, pt, target),
+ .auto => return isByRef(return_type, zcu, target),
.wasm_watc => {
- const ty_classes = abi.classifyType(return_type, pt.zcu);
+ const ty_classes = abi.classifyType(return_type, zcu);
if (ty_classes[0] == .indirect) return true;
if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true;
return false;
@@ -1424,94 +1433,88 @@ fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.
/// Lowers a Zig type and its value based on a given calling convention to ensure
/// it matches the ABI.
-fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: WValue) !void {
+fn lowerArg(cg: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: WValue) !void {
if (cc != .wasm_watc) {
- return func.lowerToStack(value);
+ return cg.lowerToStack(value);
}
- const pt = func.pt;
- const zcu = pt.zcu;
+ const zcu = cg.pt.zcu;
const ty_classes = abi.classifyType(ty, zcu);
assert(ty_classes[0] != .none);
switch (ty.zigTypeTag(zcu)) {
.@"struct", .@"union" => {
if (ty_classes[0] == .indirect) {
- return func.lowerToStack(value);
+ return cg.lowerToStack(value);
}
assert(ty_classes[0] == .direct);
const scalar_type = abi.scalarType(ty, zcu);
switch (value) {
- .memory,
- .memory_offset,
- .stack_offset,
- => _ = try func.load(value, scalar_type, 0),
+ .nav_ref, .stack_offset => _ = try cg.load(value, scalar_type, 0),
.dead => unreachable,
- else => try func.emitWValue(value),
+ else => try cg.emitWValue(value),
}
},
.int, .float => {
if (ty_classes[1] == .none) {
- return func.lowerToStack(value);
+ return cg.lowerToStack(value);
}
assert(ty_classes[0] == .direct and ty_classes[1] == .direct);
assert(ty.abiSize(zcu) == 16);
// in this case we have an integer or float that must be lowered as 2 i64's.
- try func.emitWValue(value);
- try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 });
- try func.emitWValue(value);
- try func.addMemArg(.i64_load, .{ .offset = value.offset() + 8, .alignment = 8 });
+ try cg.emitWValue(value);
+ try cg.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 });
+ try cg.emitWValue(value);
+ try cg.addMemArg(.i64_load, .{ .offset = value.offset() + 8, .alignment = 8 });
},
- else => return func.lowerToStack(value),
+ else => return cg.lowerToStack(value),
}
}
/// Lowers a `WValue` to the stack. This means when the `value` results in
/// `.stack_offset` we calculate the pointer of this offset and use that.
/// The value is left on the stack, and not stored in any temporary.
-fn lowerToStack(func: *CodeGen, value: WValue) !void {
+fn lowerToStack(cg: *CodeGen, value: WValue) !void {
switch (value) {
.stack_offset => |offset| {
- try func.emitWValue(value);
+ try cg.emitWValue(value);
if (offset.value > 0) {
- switch (func.arch()) {
+ switch (cg.ptr_size) {
.wasm32 => {
- try func.addImm32(offset.value);
- try func.addTag(.i32_add);
+ try cg.addImm32(offset.value);
+ try cg.addTag(.i32_add);
},
.wasm64 => {
- try func.addImm64(offset.value);
- try func.addTag(.i64_add);
+ try cg.addImm64(offset.value);
+ try cg.addTag(.i64_add);
},
- else => unreachable,
}
}
},
- else => try func.emitWValue(value),
+ else => try cg.emitWValue(value),
}
}
/// Creates a local for the initial stack value
/// Asserts `initial_stack_value` is `.none`
-fn initializeStack(func: *CodeGen) !void {
- assert(func.initial_stack_value == .none);
+fn initializeStack(cg: *CodeGen) !void {
+ assert(cg.initial_stack_value == .none);
// Reserve a local to store the current stack pointer
// We can later use this local to set the stack pointer back to the value
// we have stored here.
- func.initial_stack_value = try func.ensureAllocLocal(Type.usize);
+ cg.initial_stack_value = try cg.ensureAllocLocal(Type.usize);
// Also reserve a local to store the bottom stack value
- func.bottom_stack_value = try func.ensureAllocLocal(Type.usize);
+ cg.bottom_stack_value = try cg.ensureAllocLocal(Type.usize);
}
/// Reads the stack pointer from `Context.initial_stack_value` and writes it
/// to the global stack pointer variable
-fn restoreStackPointer(func: *CodeGen) !void {
+fn restoreStackPointer(cg: *CodeGen) !void {
// only restore the pointer if it was initialized
- if (func.initial_stack_value == .none) return;
+ if (cg.initial_stack_value == .none) return;
// Get the original stack pointer's value
- try func.emitWValue(func.initial_stack_value);
+ try cg.emitWValue(cg.initial_stack_value);
- // save its value in the global stack pointer
- try func.addLabel(.global_set, @intFromEnum(func.bin_file.zig_object.?.stack_pointer_sym));
+ try cg.addTag(.global_set_sp);
}
/// From a given type, will create space on the virtual stack to store the value of such type.
@@ -1520,24 +1523,25 @@ fn restoreStackPointer(func: *CodeGen) !void {
/// moveStack unless a local was already created to store the pointer.
///
/// Asserts Type has codegenbits
-fn allocStack(func: *CodeGen, ty: Type) !WValue {
- const zcu = func.pt.zcu;
+fn allocStack(cg: *CodeGen, ty: Type) !WValue {
+ const pt = cg.pt;
+ const zcu = pt.zcu;
assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
- if (func.initial_stack_value == .none) {
- try func.initializeStack();
+ if (cg.initial_stack_value == .none) {
+ try cg.initializeStack();
}
const abi_size = std.math.cast(u32, ty.abiSize(zcu)) orelse {
- return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
- ty.fmt(func.pt), ty.abiSize(zcu),
+ return cg.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
+ ty.fmt(pt), ty.abiSize(zcu),
});
};
const abi_align = ty.abiAlignment(zcu);
- func.stack_alignment = func.stack_alignment.max(abi_align);
+ cg.stack_alignment = cg.stack_alignment.max(abi_align);
- const offset: u32 = @intCast(abi_align.forward(func.stack_size));
- defer func.stack_size = offset + abi_size;
+ const offset: u32 = @intCast(abi_align.forward(cg.stack_size));
+ defer cg.stack_size = offset + abi_size;
return .{ .stack_offset = .{ .value = offset, .references = 1 } };
}
@@ -1546,30 +1550,30 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
/// the value of its type will live.
/// This is different from allocStack where this will use the pointer's alignment
/// if it is set, to ensure the stack alignment will be set correctly.
-fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
- const pt = func.pt;
+fn allocStackPtr(cg: *CodeGen, inst: Air.Inst.Index) !WValue {
+ const pt = cg.pt;
const zcu = pt.zcu;
- const ptr_ty = func.typeOfIndex(inst);
+ const ptr_ty = cg.typeOfIndex(inst);
const pointee_ty = ptr_ty.childType(zcu);
- if (func.initial_stack_value == .none) {
- try func.initializeStack();
+ if (cg.initial_stack_value == .none) {
+ try cg.initializeStack();
}
if (!pointee_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- return func.allocStack(Type.usize); // create a value containing just the stack pointer.
+ return cg.allocStack(Type.usize); // create a value containing just the stack pointer.
}
const abi_alignment = ptr_ty.ptrAlignment(zcu);
const abi_size = std.math.cast(u32, pointee_ty.abiSize(zcu)) orelse {
- return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
+ return cg.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
pointee_ty.fmt(pt), pointee_ty.abiSize(zcu),
});
};
- func.stack_alignment = func.stack_alignment.max(abi_alignment);
+ cg.stack_alignment = cg.stack_alignment.max(abi_alignment);
- const offset: u32 = @intCast(abi_alignment.forward(func.stack_size));
- defer func.stack_size = offset + abi_size;
+ const offset: u32 = @intCast(abi_alignment.forward(cg.stack_size));
+ defer cg.stack_size = offset + abi_size;
return .{ .stack_offset = .{ .value = offset, .references = 1 } };
}
@@ -1583,14 +1587,14 @@ fn toWasmBits(bits: u16) ?u16 {
/// Performs a copy of bytes for a given type. Copying all bytes
/// from rhs to lhs.
-fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
+fn memcpy(cg: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
// When bulk_memory is enabled, we lower it to wasm's memcpy instruction.
// If not, we lower it ourselves manually
- if (std.Target.wasm.featureSetHas(func.target.cpu.features, .bulk_memory)) {
- try func.lowerToStack(dst);
- try func.lowerToStack(src);
- try func.emitWValue(len);
- try func.addExtended(.memory_copy);
+ if (std.Target.wasm.featureSetHas(cg.target.cpu.features, .bulk_memory)) {
+ try cg.lowerToStack(dst);
+ try cg.lowerToStack(src);
+ try cg.emitWValue(len);
+ try cg.addExtended(.memory_copy);
return;
}
@@ -1611,19 +1615,18 @@ fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
const rhs_base = src.offset();
while (offset < length) : (offset += 1) {
// get dst's address to store the result
- try func.emitWValue(dst);
+ try cg.emitWValue(dst);
// load byte from src's address
- try func.emitWValue(src);
- switch (func.arch()) {
+ try cg.emitWValue(src);
+ switch (cg.ptr_size) {
.wasm32 => {
- try func.addMemArg(.i32_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
- try func.addMemArg(.i32_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
+ try cg.addMemArg(.i32_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
+ try cg.addMemArg(.i32_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
},
.wasm64 => {
- try func.addMemArg(.i64_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
- try func.addMemArg(.i64_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
+ try cg.addMemArg(.i64_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
+ try cg.addMemArg(.i64_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
},
- else => unreachable,
}
}
return;
@@ -1633,94 +1636,84 @@ fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
// allocate a local for the offset, and set it to 0.
// This to ensure that inside loops we correctly re-set the counter.
- var offset = try func.allocLocal(Type.usize); // local for counter
- defer offset.free(func);
- switch (func.arch()) {
- .wasm32 => try func.addImm32(0),
- .wasm64 => try func.addImm64(0),
- else => unreachable,
+ var offset = try cg.allocLocal(Type.usize); // local for counter
+ defer offset.free(cg);
+ switch (cg.ptr_size) {
+ .wasm32 => try cg.addImm32(0),
+ .wasm64 => try cg.addImm64(0),
}
- try func.addLabel(.local_set, offset.local.value);
+ try cg.addLocal(.local_set, offset.local.value);
// outer block to jump to when loop is done
- try func.startBlock(.block, wasm.block_empty);
- try func.startBlock(.loop, wasm.block_empty);
+ try cg.startBlock(.block, .empty);
+ try cg.startBlock(.loop, .empty);
// loop condition (offset == length -> break)
{
- try func.emitWValue(offset);
- try func.emitWValue(len);
- switch (func.arch()) {
- .wasm32 => try func.addTag(.i32_eq),
- .wasm64 => try func.addTag(.i64_eq),
- else => unreachable,
+ try cg.emitWValue(offset);
+ try cg.emitWValue(len);
+ switch (cg.ptr_size) {
+ .wasm32 => try cg.addTag(.i32_eq),
+ .wasm64 => try cg.addTag(.i64_eq),
}
- try func.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
+ try cg.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
}
// get dst ptr
{
- try func.emitWValue(dst);
- try func.emitWValue(offset);
- switch (func.arch()) {
- .wasm32 => try func.addTag(.i32_add),
- .wasm64 => try func.addTag(.i64_add),
- else => unreachable,
+ try cg.emitWValue(dst);
+ try cg.emitWValue(offset);
+ switch (cg.ptr_size) {
+ .wasm32 => try cg.addTag(.i32_add),
+ .wasm64 => try cg.addTag(.i64_add),
}
}
// get src value and also store in dst
{
- try func.emitWValue(src);
- try func.emitWValue(offset);
- switch (func.arch()) {
+ try cg.emitWValue(src);
+ try cg.emitWValue(offset);
+ switch (cg.ptr_size) {
.wasm32 => {
- try func.addTag(.i32_add);
- try func.addMemArg(.i32_load8_u, .{ .offset = src.offset(), .alignment = 1 });
- try func.addMemArg(.i32_store8, .{ .offset = dst.offset(), .alignment = 1 });
+ try cg.addTag(.i32_add);
+ try cg.addMemArg(.i32_load8_u, .{ .offset = src.offset(), .alignment = 1 });
+ try cg.addMemArg(.i32_store8, .{ .offset = dst.offset(), .alignment = 1 });
},
.wasm64 => {
- try func.addTag(.i64_add);
- try func.addMemArg(.i64_load8_u, .{ .offset = src.offset(), .alignment = 1 });
- try func.addMemArg(.i64_store8, .{ .offset = dst.offset(), .alignment = 1 });
+ try cg.addTag(.i64_add);
+ try cg.addMemArg(.i64_load8_u, .{ .offset = src.offset(), .alignment = 1 });
+ try cg.addMemArg(.i64_store8, .{ .offset = dst.offset(), .alignment = 1 });
},
- else => unreachable,
}
}
// increment loop counter
{
- try func.emitWValue(offset);
- switch (func.arch()) {
+ try cg.emitWValue(offset);
+ switch (cg.ptr_size) {
.wasm32 => {
- try func.addImm32(1);
- try func.addTag(.i32_add);
+ try cg.addImm32(1);
+ try cg.addTag(.i32_add);
},
.wasm64 => {
- try func.addImm64(1);
- try func.addTag(.i64_add);
+ try cg.addImm64(1);
+ try cg.addTag(.i64_add);
},
- else => unreachable,
}
- try func.addLabel(.local_set, offset.local.value);
- try func.addLabel(.br, 0); // jump to start of loop
+ try cg.addLocal(.local_set, offset.local.value);
+ try cg.addLabel(.br, 0); // jump to start of loop
}
- try func.endBlock(); // close off loop block
- try func.endBlock(); // close off outer block
-}
-
-fn ptrSize(func: *const CodeGen) u16 {
- return @divExact(func.target.ptrBitWidth(), 8);
+ try cg.endBlock(); // close off loop block
+ try cg.endBlock(); // close off outer block
}
-fn arch(func: *const CodeGen) std.Target.Cpu.Arch {
- return func.target.cpu.arch;
+fn ptrSize(cg: *const CodeGen) u16 {
+ return @divExact(cg.target.ptrBitWidth(), 8);
}
/// For a given `Type`, will return true when the type will be passed
/// by reference, rather than by value
-fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
- const zcu = pt.zcu;
+fn isByRef(ty: Type, zcu: *const Zcu, target: *const std.Target) bool {
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
.type,
@@ -1753,14 +1746,14 @@ fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
},
.@"struct" => {
if (zcu.typeToPackedStruct(ty)) |packed_struct| {
- return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt, target);
+ return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), zcu, target);
}
return ty.hasRuntimeBitsIgnoreComptime(zcu);
},
.vector => return determineSimdStoreStrategy(ty, zcu, target) == .unrolled,
.int => return ty.intInfo(zcu).bits > 64,
.@"enum" => return ty.intInfo(zcu).bits > 64,
- .float => return ty.floatBits(target) > 64,
+ .float => return ty.floatBits(target.*) > 64,
.error_union => {
const pl_ty = ty.errorUnionPayload(zcu);
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
@@ -1791,8 +1784,8 @@ const SimdStoreStrategy = enum {
/// This means when a given type is 128 bits and either the simd128 or relaxed-simd
/// features are enabled, the function will return `.direct`. This would allow to store
/// it using a instruction, rather than an unrolled version.
-fn determineSimdStoreStrategy(ty: Type, zcu: *Zcu, target: std.Target) SimdStoreStrategy {
- std.debug.assert(ty.zigTypeTag(zcu) == .vector);
+pub fn determineSimdStoreStrategy(ty: Type, zcu: *const Zcu, target: *const std.Target) SimdStoreStrategy {
+ assert(ty.zigTypeTag(zcu) == .vector);
if (ty.bitSize(zcu) != 128) return .unrolled;
const hasFeature = std.Target.wasm.featureSetHas;
const features = target.cpu.features;
@@ -1806,215 +1799,214 @@ fn determineSimdStoreStrategy(ty: Type, zcu: *Zcu, target: std.Target) SimdStore
/// This can be used to get a pointer to a struct field, error payload, etc.
/// By providing `modify` as action, it will modify the given `ptr_value` instead of making a new
/// local value to store the pointer. This allows for local re-use and improves binary size.
-fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: enum { modify, new }) InnerError!WValue {
+fn buildPointerOffset(cg: *CodeGen, ptr_value: WValue, offset: u64, action: enum { modify, new }) InnerError!WValue {
// do not perform arithmetic when offset is 0.
if (offset == 0 and ptr_value.offset() == 0 and action == .modify) return ptr_value;
const result_ptr: WValue = switch (action) {
- .new => try func.ensureAllocLocal(Type.usize),
+ .new => try cg.ensureAllocLocal(Type.usize),
.modify => ptr_value,
};
- try func.emitWValue(ptr_value);
+ try cg.emitWValue(ptr_value);
if (offset + ptr_value.offset() > 0) {
- switch (func.arch()) {
+ switch (cg.ptr_size) {
.wasm32 => {
- try func.addImm32(@intCast(offset + ptr_value.offset()));
- try func.addTag(.i32_add);
+ try cg.addImm32(@intCast(offset + ptr_value.offset()));
+ try cg.addTag(.i32_add);
},
.wasm64 => {
- try func.addImm64(offset + ptr_value.offset());
- try func.addTag(.i64_add);
+ try cg.addImm64(offset + ptr_value.offset());
+ try cg.addTag(.i64_add);
},
- else => unreachable,
}
}
- try func.addLabel(.local_set, result_ptr.local.value);
+ try cg.addLocal(.local_set, result_ptr.local.value);
return result_ptr;
}
-fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const air_tags = func.air.instructions.items(.tag);
+fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const air_tags = cg.air.instructions.items(.tag);
return switch (air_tags[@intFromEnum(inst)]) {
.inferred_alloc, .inferred_alloc_comptime => unreachable,
- .add => func.airBinOp(inst, .add),
- .add_sat => func.airSatBinOp(inst, .add),
- .add_wrap => func.airWrapBinOp(inst, .add),
- .sub => func.airBinOp(inst, .sub),
- .sub_sat => func.airSatBinOp(inst, .sub),
- .sub_wrap => func.airWrapBinOp(inst, .sub),
- .mul => func.airBinOp(inst, .mul),
- .mul_sat => func.airSatMul(inst),
- .mul_wrap => func.airWrapBinOp(inst, .mul),
- .div_float, .div_exact => func.airDiv(inst),
- .div_trunc => func.airDivTrunc(inst),
- .div_floor => func.airDivFloor(inst),
- .bit_and => func.airBinOp(inst, .@"and"),
- .bit_or => func.airBinOp(inst, .@"or"),
- .bool_and => func.airBinOp(inst, .@"and"),
- .bool_or => func.airBinOp(inst, .@"or"),
- .rem => func.airRem(inst),
- .mod => func.airMod(inst),
- .shl => func.airWrapBinOp(inst, .shl),
- .shl_exact => func.airBinOp(inst, .shl),
- .shl_sat => func.airShlSat(inst),
- .shr, .shr_exact => func.airBinOp(inst, .shr),
- .xor => func.airBinOp(inst, .xor),
- .max => func.airMaxMin(inst, .max),
- .min => func.airMaxMin(inst, .min),
- .mul_add => func.airMulAdd(inst),
-
- .sqrt => func.airUnaryFloatOp(inst, .sqrt),
- .sin => func.airUnaryFloatOp(inst, .sin),
- .cos => func.airUnaryFloatOp(inst, .cos),
- .tan => func.airUnaryFloatOp(inst, .tan),
- .exp => func.airUnaryFloatOp(inst, .exp),
- .exp2 => func.airUnaryFloatOp(inst, .exp2),
- .log => func.airUnaryFloatOp(inst, .log),
- .log2 => func.airUnaryFloatOp(inst, .log2),
- .log10 => func.airUnaryFloatOp(inst, .log10),
- .floor => func.airUnaryFloatOp(inst, .floor),
- .ceil => func.airUnaryFloatOp(inst, .ceil),
- .round => func.airUnaryFloatOp(inst, .round),
- .trunc_float => func.airUnaryFloatOp(inst, .trunc),
- .neg => func.airUnaryFloatOp(inst, .neg),
-
- .abs => func.airAbs(inst),
-
- .add_with_overflow => func.airAddSubWithOverflow(inst, .add),
- .sub_with_overflow => func.airAddSubWithOverflow(inst, .sub),
- .shl_with_overflow => func.airShlWithOverflow(inst),
- .mul_with_overflow => func.airMulWithOverflow(inst),
-
- .clz => func.airClz(inst),
- .ctz => func.airCtz(inst),
-
- .cmp_eq => func.airCmp(inst, .eq),
- .cmp_gte => func.airCmp(inst, .gte),
- .cmp_gt => func.airCmp(inst, .gt),
- .cmp_lte => func.airCmp(inst, .lte),
- .cmp_lt => func.airCmp(inst, .lt),
- .cmp_neq => func.airCmp(inst, .neq),
-
- .cmp_vector => func.airCmpVector(inst),
- .cmp_lt_errors_len => func.airCmpLtErrorsLen(inst),
-
- .array_elem_val => func.airArrayElemVal(inst),
- .array_to_slice => func.airArrayToSlice(inst),
- .alloc => func.airAlloc(inst),
- .arg => func.airArg(inst),
- .bitcast => func.airBitcast(inst),
- .block => func.airBlock(inst),
- .trap => func.airTrap(inst),
- .breakpoint => func.airBreakpoint(inst),
- .br => func.airBr(inst),
- .repeat => func.airRepeat(inst),
- .switch_dispatch => return func.fail("TODO implement `switch_dispatch`", .{}),
- .int_from_bool => func.airIntFromBool(inst),
- .cond_br => func.airCondBr(inst),
- .intcast => func.airIntcast(inst),
- .fptrunc => func.airFptrunc(inst),
- .fpext => func.airFpext(inst),
- .int_from_float => func.airIntFromFloat(inst),
- .float_from_int => func.airFloatFromInt(inst),
- .get_union_tag => func.airGetUnionTag(inst),
-
- .@"try" => func.airTry(inst),
- .try_cold => func.airTry(inst),
- .try_ptr => func.airTryPtr(inst),
- .try_ptr_cold => func.airTryPtr(inst),
-
- .dbg_stmt => func.airDbgStmt(inst),
- .dbg_empty_stmt => try func.finishAir(inst, .none, &.{}),
- .dbg_inline_block => func.airDbgInlineBlock(inst),
- .dbg_var_ptr => func.airDbgVar(inst, .local_var, true),
- .dbg_var_val => func.airDbgVar(inst, .local_var, false),
- .dbg_arg_inline => func.airDbgVar(inst, .local_arg, false),
-
- .call => func.airCall(inst, .auto),
- .call_always_tail => func.airCall(inst, .always_tail),
- .call_never_tail => func.airCall(inst, .never_tail),
- .call_never_inline => func.airCall(inst, .never_inline),
-
- .is_err => func.airIsErr(inst, .i32_ne),
- .is_non_err => func.airIsErr(inst, .i32_eq),
-
- .is_null => func.airIsNull(inst, .i32_eq, .value),
- .is_non_null => func.airIsNull(inst, .i32_ne, .value),
- .is_null_ptr => func.airIsNull(inst, .i32_eq, .ptr),
- .is_non_null_ptr => func.airIsNull(inst, .i32_ne, .ptr),
-
- .load => func.airLoad(inst),
- .loop => func.airLoop(inst),
- .memset => func.airMemset(inst, false),
- .memset_safe => func.airMemset(inst, true),
- .not => func.airNot(inst),
- .optional_payload => func.airOptionalPayload(inst),
- .optional_payload_ptr => func.airOptionalPayloadPtr(inst),
- .optional_payload_ptr_set => func.airOptionalPayloadPtrSet(inst),
- .ptr_add => func.airPtrBinOp(inst, .add),
- .ptr_sub => func.airPtrBinOp(inst, .sub),
- .ptr_elem_ptr => func.airPtrElemPtr(inst),
- .ptr_elem_val => func.airPtrElemVal(inst),
- .int_from_ptr => func.airIntFromPtr(inst),
- .ret => func.airRet(inst),
- .ret_safe => func.airRet(inst), // TODO
- .ret_ptr => func.airRetPtr(inst),
- .ret_load => func.airRetLoad(inst),
- .splat => func.airSplat(inst),
- .select => func.airSelect(inst),
- .shuffle => func.airShuffle(inst),
- .reduce => func.airReduce(inst),
- .aggregate_init => func.airAggregateInit(inst),
- .union_init => func.airUnionInit(inst),
- .prefetch => func.airPrefetch(inst),
- .popcount => func.airPopcount(inst),
- .byte_swap => func.airByteSwap(inst),
- .bit_reverse => func.airBitReverse(inst),
-
- .slice => func.airSlice(inst),
- .slice_len => func.airSliceLen(inst),
- .slice_elem_val => func.airSliceElemVal(inst),
- .slice_elem_ptr => func.airSliceElemPtr(inst),
- .slice_ptr => func.airSlicePtr(inst),
- .ptr_slice_len_ptr => func.airPtrSliceFieldPtr(inst, func.ptrSize()),
- .ptr_slice_ptr_ptr => func.airPtrSliceFieldPtr(inst, 0),
- .store => func.airStore(inst, false),
- .store_safe => func.airStore(inst, true),
-
- .set_union_tag => func.airSetUnionTag(inst),
- .struct_field_ptr => func.airStructFieldPtr(inst),
- .struct_field_ptr_index_0 => func.airStructFieldPtrIndex(inst, 0),
- .struct_field_ptr_index_1 => func.airStructFieldPtrIndex(inst, 1),
- .struct_field_ptr_index_2 => func.airStructFieldPtrIndex(inst, 2),
- .struct_field_ptr_index_3 => func.airStructFieldPtrIndex(inst, 3),
- .struct_field_val => func.airStructFieldVal(inst),
- .field_parent_ptr => func.airFieldParentPtr(inst),
-
- .switch_br => func.airSwitchBr(inst),
- .loop_switch_br => return func.fail("TODO implement `loop_switch_br`", .{}),
- .trunc => func.airTrunc(inst),
- .unreach => func.airUnreachable(inst),
-
- .wrap_optional => func.airWrapOptional(inst),
- .unwrap_errunion_payload => func.airUnwrapErrUnionPayload(inst, false),
- .unwrap_errunion_payload_ptr => func.airUnwrapErrUnionPayload(inst, true),
- .unwrap_errunion_err => func.airUnwrapErrUnionError(inst, false),
- .unwrap_errunion_err_ptr => func.airUnwrapErrUnionError(inst, true),
- .wrap_errunion_payload => func.airWrapErrUnionPayload(inst),
- .wrap_errunion_err => func.airWrapErrUnionErr(inst),
- .errunion_payload_ptr_set => func.airErrUnionPayloadPtrSet(inst),
- .error_name => func.airErrorName(inst),
-
- .wasm_memory_size => func.airWasmMemorySize(inst),
- .wasm_memory_grow => func.airWasmMemoryGrow(inst),
-
- .memcpy => func.airMemcpy(inst),
-
- .ret_addr => func.airRetAddr(inst),
- .tag_name => func.airTagName(inst),
-
- .error_set_has_value => func.airErrorSetHasValue(inst),
- .frame_addr => func.airFrameAddress(inst),
+ .add => cg.airBinOp(inst, .add),
+ .add_sat => cg.airSatBinOp(inst, .add),
+ .add_wrap => cg.airWrapBinOp(inst, .add),
+ .sub => cg.airBinOp(inst, .sub),
+ .sub_sat => cg.airSatBinOp(inst, .sub),
+ .sub_wrap => cg.airWrapBinOp(inst, .sub),
+ .mul => cg.airBinOp(inst, .mul),
+ .mul_sat => cg.airSatMul(inst),
+ .mul_wrap => cg.airWrapBinOp(inst, .mul),
+ .div_float, .div_exact => cg.airDiv(inst),
+ .div_trunc => cg.airDivTrunc(inst),
+ .div_floor => cg.airDivFloor(inst),
+ .bit_and => cg.airBinOp(inst, .@"and"),
+ .bit_or => cg.airBinOp(inst, .@"or"),
+ .bool_and => cg.airBinOp(inst, .@"and"),
+ .bool_or => cg.airBinOp(inst, .@"or"),
+ .rem => cg.airRem(inst),
+ .mod => cg.airMod(inst),
+ .shl => cg.airWrapBinOp(inst, .shl),
+ .shl_exact => cg.airBinOp(inst, .shl),
+ .shl_sat => cg.airShlSat(inst),
+ .shr, .shr_exact => cg.airBinOp(inst, .shr),
+ .xor => cg.airBinOp(inst, .xor),
+ .max => cg.airMaxMin(inst, .fmax, .gt),
+ .min => cg.airMaxMin(inst, .fmin, .lt),
+ .mul_add => cg.airMulAdd(inst),
+
+ .sqrt => cg.airUnaryFloatOp(inst, .sqrt),
+ .sin => cg.airUnaryFloatOp(inst, .sin),
+ .cos => cg.airUnaryFloatOp(inst, .cos),
+ .tan => cg.airUnaryFloatOp(inst, .tan),
+ .exp => cg.airUnaryFloatOp(inst, .exp),
+ .exp2 => cg.airUnaryFloatOp(inst, .exp2),
+ .log => cg.airUnaryFloatOp(inst, .log),
+ .log2 => cg.airUnaryFloatOp(inst, .log2),
+ .log10 => cg.airUnaryFloatOp(inst, .log10),
+ .floor => cg.airUnaryFloatOp(inst, .floor),
+ .ceil => cg.airUnaryFloatOp(inst, .ceil),
+ .round => cg.airUnaryFloatOp(inst, .round),
+ .trunc_float => cg.airUnaryFloatOp(inst, .trunc),
+ .neg => cg.airUnaryFloatOp(inst, .neg),
+
+ .abs => cg.airAbs(inst),
+
+ .add_with_overflow => cg.airAddSubWithOverflow(inst, .add),
+ .sub_with_overflow => cg.airAddSubWithOverflow(inst, .sub),
+ .shl_with_overflow => cg.airShlWithOverflow(inst),
+ .mul_with_overflow => cg.airMulWithOverflow(inst),
+
+ .clz => cg.airClz(inst),
+ .ctz => cg.airCtz(inst),
+
+ .cmp_eq => cg.airCmp(inst, .eq),
+ .cmp_gte => cg.airCmp(inst, .gte),
+ .cmp_gt => cg.airCmp(inst, .gt),
+ .cmp_lte => cg.airCmp(inst, .lte),
+ .cmp_lt => cg.airCmp(inst, .lt),
+ .cmp_neq => cg.airCmp(inst, .neq),
+
+ .cmp_vector => cg.airCmpVector(inst),
+ .cmp_lt_errors_len => cg.airCmpLtErrorsLen(inst),
+
+ .array_elem_val => cg.airArrayElemVal(inst),
+ .array_to_slice => cg.airArrayToSlice(inst),
+ .alloc => cg.airAlloc(inst),
+ .arg => cg.airArg(inst),
+ .bitcast => cg.airBitcast(inst),
+ .block => cg.airBlock(inst),
+ .trap => cg.airTrap(inst),
+ .breakpoint => cg.airBreakpoint(inst),
+ .br => cg.airBr(inst),
+ .repeat => cg.airRepeat(inst),
+ .switch_dispatch => return cg.fail("TODO implement `switch_dispatch`", .{}),
+ .int_from_bool => cg.airIntFromBool(inst),
+ .cond_br => cg.airCondBr(inst),
+ .intcast => cg.airIntcast(inst),
+ .fptrunc => cg.airFptrunc(inst),
+ .fpext => cg.airFpext(inst),
+ .int_from_float => cg.airIntFromFloat(inst),
+ .float_from_int => cg.airFloatFromInt(inst),
+ .get_union_tag => cg.airGetUnionTag(inst),
+
+ .@"try" => cg.airTry(inst),
+ .try_cold => cg.airTry(inst),
+ .try_ptr => cg.airTryPtr(inst),
+ .try_ptr_cold => cg.airTryPtr(inst),
+
+ .dbg_stmt => cg.airDbgStmt(inst),
+ .dbg_empty_stmt => try cg.finishAir(inst, .none, &.{}),
+ .dbg_inline_block => cg.airDbgInlineBlock(inst),
+ .dbg_var_ptr => cg.airDbgVar(inst, .local_var, true),
+ .dbg_var_val => cg.airDbgVar(inst, .local_var, false),
+ .dbg_arg_inline => cg.airDbgVar(inst, .local_arg, false),
+
+ .call => cg.airCall(inst, .auto),
+ .call_always_tail => cg.airCall(inst, .always_tail),
+ .call_never_tail => cg.airCall(inst, .never_tail),
+ .call_never_inline => cg.airCall(inst, .never_inline),
+
+ .is_err => cg.airIsErr(inst, .i32_ne),
+ .is_non_err => cg.airIsErr(inst, .i32_eq),
+
+ .is_null => cg.airIsNull(inst, .i32_eq, .value),
+ .is_non_null => cg.airIsNull(inst, .i32_ne, .value),
+ .is_null_ptr => cg.airIsNull(inst, .i32_eq, .ptr),
+ .is_non_null_ptr => cg.airIsNull(inst, .i32_ne, .ptr),
+
+ .load => cg.airLoad(inst),
+ .loop => cg.airLoop(inst),
+ .memset => cg.airMemset(inst, false),
+ .memset_safe => cg.airMemset(inst, true),
+ .not => cg.airNot(inst),
+ .optional_payload => cg.airOptionalPayload(inst),
+ .optional_payload_ptr => cg.airOptionalPayloadPtr(inst),
+ .optional_payload_ptr_set => cg.airOptionalPayloadPtrSet(inst),
+ .ptr_add => cg.airPtrBinOp(inst, .add),
+ .ptr_sub => cg.airPtrBinOp(inst, .sub),
+ .ptr_elem_ptr => cg.airPtrElemPtr(inst),
+ .ptr_elem_val => cg.airPtrElemVal(inst),
+ .int_from_ptr => cg.airIntFromPtr(inst),
+ .ret => cg.airRet(inst),
+ .ret_safe => cg.airRet(inst), // TODO
+ .ret_ptr => cg.airRetPtr(inst),
+ .ret_load => cg.airRetLoad(inst),
+ .splat => cg.airSplat(inst),
+ .select => cg.airSelect(inst),
+ .shuffle => cg.airShuffle(inst),
+ .reduce => cg.airReduce(inst),
+ .aggregate_init => cg.airAggregateInit(inst),
+ .union_init => cg.airUnionInit(inst),
+ .prefetch => cg.airPrefetch(inst),
+ .popcount => cg.airPopcount(inst),
+ .byte_swap => cg.airByteSwap(inst),
+ .bit_reverse => cg.airBitReverse(inst),
+
+ .slice => cg.airSlice(inst),
+ .slice_len => cg.airSliceLen(inst),
+ .slice_elem_val => cg.airSliceElemVal(inst),
+ .slice_elem_ptr => cg.airSliceElemPtr(inst),
+ .slice_ptr => cg.airSlicePtr(inst),
+ .ptr_slice_len_ptr => cg.airPtrSliceFieldPtr(inst, cg.ptrSize()),
+ .ptr_slice_ptr_ptr => cg.airPtrSliceFieldPtr(inst, 0),
+ .store => cg.airStore(inst, false),
+ .store_safe => cg.airStore(inst, true),
+
+ .set_union_tag => cg.airSetUnionTag(inst),
+ .struct_field_ptr => cg.airStructFieldPtr(inst),
+ .struct_field_ptr_index_0 => cg.airStructFieldPtrIndex(inst, 0),
+ .struct_field_ptr_index_1 => cg.airStructFieldPtrIndex(inst, 1),
+ .struct_field_ptr_index_2 => cg.airStructFieldPtrIndex(inst, 2),
+ .struct_field_ptr_index_3 => cg.airStructFieldPtrIndex(inst, 3),
+ .struct_field_val => cg.airStructFieldVal(inst),
+ .field_parent_ptr => cg.airFieldParentPtr(inst),
+
+ .switch_br => cg.airSwitchBr(inst),
+ .loop_switch_br => return cg.fail("TODO implement `loop_switch_br`", .{}),
+ .trunc => cg.airTrunc(inst),
+ .unreach => cg.airUnreachable(inst),
+
+ .wrap_optional => cg.airWrapOptional(inst),
+ .unwrap_errunion_payload => cg.airUnwrapErrUnionPayload(inst, false),
+ .unwrap_errunion_payload_ptr => cg.airUnwrapErrUnionPayload(inst, true),
+ .unwrap_errunion_err => cg.airUnwrapErrUnionError(inst, false),
+ .unwrap_errunion_err_ptr => cg.airUnwrapErrUnionError(inst, true),
+ .wrap_errunion_payload => cg.airWrapErrUnionPayload(inst),
+ .wrap_errunion_err => cg.airWrapErrUnionErr(inst),
+ .errunion_payload_ptr_set => cg.airErrUnionPayloadPtrSet(inst),
+ .error_name => cg.airErrorName(inst),
+
+ .wasm_memory_size => cg.airWasmMemorySize(inst),
+ .wasm_memory_grow => cg.airWasmMemoryGrow(inst),
+
+ .memcpy => cg.airMemcpy(inst),
+
+ .ret_addr => cg.airRetAddr(inst),
+ .tag_name => cg.airTagName(inst),
+
+ .error_set_has_value => cg.airErrorSetHasValue(inst),
+ .frame_addr => cg.airFrameAddress(inst),
.assembly,
.is_err_ptr,
@@ -2030,18 +2022,18 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.c_va_copy,
.c_va_end,
.c_va_start,
- => |tag| return func.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
+ => |tag| return cg.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
- .atomic_load => func.airAtomicLoad(inst),
+ .atomic_load => cg.airAtomicLoad(inst),
.atomic_store_unordered,
.atomic_store_monotonic,
.atomic_store_release,
.atomic_store_seq_cst,
// in WebAssembly, all atomic instructions are sequentially ordered.
- => func.airAtomicStore(inst),
- .atomic_rmw => func.airAtomicRmw(inst),
- .cmpxchg_weak => func.airCmpxchg(inst),
- .cmpxchg_strong => func.airCmpxchg(inst),
+ => cg.airAtomicStore(inst),
+ .atomic_rmw => cg.airAtomicRmw(inst),
+ .cmpxchg_weak => cg.airCmpxchg(inst),
+ .cmpxchg_strong => cg.airCmpxchg(inst),
.add_optimized,
.sub_optimized,
@@ -2062,12 +2054,12 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.cmp_vector_optimized,
.reduce_optimized,
.int_from_float_optimized,
- => return func.fail("TODO implement optimized float mode", .{}),
+ => return cg.fail("TODO implement optimized float mode", .{}),
.add_safe,
.sub_safe,
.mul_safe,
- => return func.fail("TODO implement safety_checked_instructions", .{}),
+ => return cg.fail("TODO implement safety_checked_instructions", .{}),
.work_item_id,
.work_group_size,
@@ -2076,123 +2068,120 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
}
-fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
const ip = &zcu.intern_pool;
for (body) |inst| {
- if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) {
+ if (cg.liveness.isUnused(inst) and !cg.air.mustLower(inst, ip)) {
continue;
}
- const old_bookkeeping_value = func.air_bookkeeping;
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, Liveness.bpi);
- try func.genInst(inst);
+ const old_bookkeeping_value = cg.air_bookkeeping;
+ try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, Liveness.bpi);
+ try cg.genInst(inst);
- if (std.debug.runtime_safety and func.air_bookkeeping < old_bookkeeping_value + 1) {
+ if (std.debug.runtime_safety and cg.air_bookkeeping < old_bookkeeping_value + 1) {
std.debug.panic("Missing call to `finishAir` in AIR instruction %{d} ('{}')", .{
inst,
- func.air.instructions.items(.tag)[@intFromEnum(inst)],
+ cg.air.instructions.items(.tag)[@intFromEnum(inst)],
});
}
}
}
-fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try func.resolveInst(un_op);
- const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?;
+fn airRet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const operand = try cg.resolveInst(un_op);
+ const fn_info = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?;
const ret_ty = Type.fromInterned(fn_info.return_type);
// result must be stored in the stack and we return a pointer
// to the stack instead
- if (func.return_value != .none) {
- try func.store(func.return_value, operand, ret_ty, 0);
+ if (cg.return_value != .none) {
+ try cg.store(cg.return_value, operand, ret_ty, 0);
} else if (fn_info.cc == .wasm_watc and ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
switch (ret_ty.zigTypeTag(zcu)) {
// Aggregate types can be lowered as a singular value
.@"struct", .@"union" => {
const scalar_type = abi.scalarType(ret_ty, zcu);
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
const opcode = buildOpcode(.{
.op = .load,
.width = @as(u8, @intCast(scalar_type.abiSize(zcu) * 8)),
.signedness = if (scalar_type.isSignedInt(zcu)) .signed else .unsigned,
- .valtype1 = typeToValtype(scalar_type, pt, func.target.*),
+ .valtype1 = typeToValtype(scalar_type, zcu, cg.target),
});
- try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
+ try cg.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = operand.offset(),
.alignment = @intCast(scalar_type.abiAlignment(zcu).toByteUnits().?),
});
},
- else => try func.emitWValue(operand),
+ else => try cg.emitWValue(operand),
}
} else {
if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and ret_ty.isError(zcu)) {
- try func.addImm32(0);
+ try cg.addImm32(0);
} else {
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
}
}
- try func.restoreStackPointer();
- try func.addTag(.@"return");
+ try cg.restoreStackPointer();
+ try cg.addTag(.@"return");
- return func.finishAir(inst, .none, &.{un_op});
+ return cg.finishAir(inst, .none, &.{un_op});
}
-fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const child_type = func.typeOfIndex(inst).childType(zcu);
+fn airRetPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const child_type = cg.typeOfIndex(inst).childType(zcu);
const result = result: {
if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
- break :result try func.allocStack(Type.usize); // create pointer to void
+ break :result try cg.allocStack(Type.usize); // create pointer to void
}
- const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?;
- if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) {
- break :result func.return_value;
+ const fn_info = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?;
+ if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), zcu, cg.target)) {
+ break :result cg.return_value;
}
- break :result try func.allocStackPtr(inst);
+ break :result try cg.allocStackPtr(inst);
};
- return func.finishAir(inst, result, &.{});
+ return cg.finishAir(inst, result, &.{});
}
-fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try func.resolveInst(un_op);
- const ret_ty = func.typeOf(un_op).childType(zcu);
+fn airRetLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const operand = try cg.resolveInst(un_op);
+ const ret_ty = cg.typeOf(un_op).childType(zcu);
- const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?;
+ const fn_info = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
if (ret_ty.isError(zcu)) {
- try func.addImm32(0);
+ try cg.addImm32(0);
}
- } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) {
+ } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), zcu, cg.target)) {
// leave on the stack
- _ = try func.load(operand, ret_ty, 0);
+ _ = try cg.load(operand, ret_ty, 0);
}
- try func.restoreStackPointer();
- try func.addTag(.@"return");
- return func.finishAir(inst, .none, &.{un_op});
+ try cg.restoreStackPointer();
+ try cg.addTag(.@"return");
+ return cg.finishAir(inst, .none, &.{un_op});
}
-fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) InnerError!void {
- if (modifier == .always_tail) return func.fail("TODO implement tail calls for wasm", .{});
- const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
- const extra = func.air.extraData(Air.Call, pl_op.payload);
- const args = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len]));
- const ty = func.typeOf(pl_op.operand);
+fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) InnerError!void {
+ const wasm = cg.wasm;
+ if (modifier == .always_tail) return cg.fail("TODO implement tail calls for wasm", .{});
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const extra = cg.air.extraData(Air.Call, pl_op.payload);
+ const args: []const Air.Inst.Ref = @ptrCast(cg.air.extra[extra.end..][0..extra.data.args_len]);
+ const ty = cg.typeOf(pl_op.operand);
- const pt = func.pt;
+ const pt = cg.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const fn_ty = switch (ty.zigTypeTag(zcu)) {
@@ -2202,142 +2191,109 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
};
const ret_ty = fn_ty.fnReturnType(zcu);
const fn_info = zcu.typeToFunc(fn_ty).?;
- const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*);
+ const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), zcu, cg.target);
const callee: ?InternPool.Nav.Index = blk: {
- const func_val = (try func.air.value(pl_op.operand, pt)) orelse break :blk null;
+ const func_val = (try cg.air.value(pl_op.operand, pt)) orelse break :blk null;
switch (ip.indexToKey(func_val.toIntern())) {
- .func => |function| {
- _ = try func.bin_file.getOrCreateAtomForNav(pt, function.owner_nav);
- break :blk function.owner_nav;
- },
- .@"extern" => |@"extern"| {
- const ext_nav = ip.getNav(@"extern".owner_nav);
- const ext_info = zcu.typeToFunc(Type.fromInterned(@"extern".ty)).?;
- var func_type = try genFunctype(
- func.gpa,
- ext_info.cc,
- ext_info.param_types.get(ip),
- Type.fromInterned(ext_info.return_type),
- pt,
- func.target.*,
- );
- defer func_type.deinit(func.gpa);
- const atom_index = try func.bin_file.getOrCreateAtomForNav(pt, @"extern".owner_nav);
- const atom = func.bin_file.getAtomPtr(atom_index);
- const type_index = try func.bin_file.storeNavType(@"extern".owner_nav, func_type);
- try func.bin_file.addOrUpdateImport(
- ext_nav.name.toSlice(ip),
- atom.sym_index,
- @"extern".lib_name.toSlice(ip),
- type_index,
- );
- break :blk @"extern".owner_nav;
- },
+ inline .func, .@"extern" => |x| break :blk x.owner_nav,
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
- .nav => |nav| {
- _ = try func.bin_file.getOrCreateAtomForNav(pt, nav);
- break :blk nav;
- },
+ .nav => |nav| break :blk nav,
else => {},
},
else => {},
}
- return func.fail("Expected a function, but instead found '{s}'", .{@tagName(ip.indexToKey(func_val.toIntern()))});
+ return cg.fail("unable to lower callee to a function index", .{});
};
const sret: WValue = if (first_param_sret) blk: {
- const sret_local = try func.allocStack(ret_ty);
- try func.lowerToStack(sret_local);
+ const sret_local = try cg.allocStack(ret_ty);
+ try cg.lowerToStack(sret_local);
break :blk sret_local;
} else .none;
for (args) |arg| {
- const arg_val = try func.resolveInst(arg);
+ const arg_val = try cg.resolveInst(arg);
- const arg_ty = func.typeOf(arg);
+ const arg_ty = cg.typeOf(arg);
if (!arg_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
- try func.lowerArg(zcu.typeToFunc(fn_ty).?.cc, arg_ty, arg_val);
+ try cg.lowerArg(zcu.typeToFunc(fn_ty).?.cc, arg_ty, arg_val);
}
- if (callee) |direct| {
- const atom_index = func.bin_file.zig_object.?.navs.get(direct).?.atom;
- try func.addLabel(.call, @intFromEnum(func.bin_file.getAtom(atom_index).sym_index));
+ if (callee) |nav_index| {
+ try cg.addInst(.{ .tag = .call_nav, .data = .{ .nav_index = nav_index } });
} else {
// in this case we call a function pointer
// so load its value onto the stack
- std.debug.assert(ty.zigTypeTag(zcu) == .pointer);
- const operand = try func.resolveInst(pl_op.operand);
- try func.emitWValue(operand);
-
- var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*);
- defer fn_type.deinit(func.gpa);
+ assert(ty.zigTypeTag(zcu) == .pointer);
+ const operand = try cg.resolveInst(pl_op.operand);
+ try cg.emitWValue(operand);
- const fn_type_index = try func.bin_file.zig_object.?.putOrGetFuncType(func.gpa, fn_type);
- try func.addLabel(.call_indirect, fn_type_index);
+ const fn_type_index = try wasm.internFunctionType(fn_info.cc, fn_info.param_types.get(ip), .fromInterned(fn_info.return_type), cg.target);
+ try cg.addFuncTy(.call_indirect, fn_type_index);
}
const result_value = result_value: {
if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
break :result_value .none;
} else if (ret_ty.isNoReturn(zcu)) {
- try func.addTag(.@"unreachable");
+ try cg.addTag(.@"unreachable");
break :result_value .none;
} else if (first_param_sret) {
break :result_value sret;
// TODO: Make this less fragile and optimize
} else if (zcu.typeToFunc(fn_ty).?.cc == .wasm_watc and ret_ty.zigTypeTag(zcu) == .@"struct" or ret_ty.zigTypeTag(zcu) == .@"union") {
- const result_local = try func.allocLocal(ret_ty);
- try func.addLabel(.local_set, result_local.local.value);
+ const result_local = try cg.allocLocal(ret_ty);
+ try cg.addLocal(.local_set, result_local.local.value);
const scalar_type = abi.scalarType(ret_ty, zcu);
- const result = try func.allocStack(scalar_type);
- try func.store(result, result_local, scalar_type, 0);
+ const result = try cg.allocStack(scalar_type);
+ try cg.store(result, result_local, scalar_type, 0);
break :result_value result;
} else {
- const result_local = try func.allocLocal(ret_ty);
- try func.addLabel(.local_set, result_local.local.value);
+ const result_local = try cg.allocLocal(ret_ty);
+ try cg.addLocal(.local_set, result_local.local.value);
break :result_value result_local;
}
};
- var bt = try func.iterateBigTomb(inst, 1 + args.len);
+ var bt = try cg.iterateBigTomb(inst, 1 + args.len);
bt.feed(pl_op.operand);
for (args) |arg| bt.feed(arg);
return bt.finishAir(result_value);
}
-fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const value = try func.allocStackPtr(inst);
- return func.finishAir(inst, value, &.{});
+fn airAlloc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const value = try cg.allocStackPtr(inst);
+ return cg.finishAir(inst, value, &.{});
}
-fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
- const pt = func.pt;
+fn airStore(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
+ const pt = cg.pt;
const zcu = pt.zcu;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
// TODO if the value is undef, don't lower this instruction
}
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
- const ptr_ty = func.typeOf(bin_op.lhs);
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
+ const ptr_ty = cg.typeOf(bin_op.lhs);
const ptr_info = ptr_ty.ptrInfo(zcu);
const ty = ptr_ty.childType(zcu);
if (ptr_info.packed_offset.host_size == 0) {
- try func.store(lhs, rhs, ty, 0);
+ try cg.store(lhs, rhs, ty, 0);
} else {
// at this point we have a non-natural alignment, we must
// load the value, and then shift+or the rhs into the result location.
const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8);
- if (isByRef(int_elem_ty, pt, func.target.*)) {
- return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
+ if (isByRef(int_elem_ty, zcu, cg.target)) {
+ return cg.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
}
var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(zcu)))) - 1));
@@ -2356,115 +2312,115 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
else
.{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(zcu)) };
- try func.emitWValue(lhs);
- const loaded = try func.load(lhs, int_elem_ty, 0);
- const anded = try func.binOp(loaded, mask_val, int_elem_ty, .@"and");
- const extended_value = try func.intcast(rhs, ty, int_elem_ty);
- const masked_value = try func.binOp(extended_value, wrap_mask_val, int_elem_ty, .@"and");
+ try cg.emitWValue(lhs);
+ const loaded = try cg.load(lhs, int_elem_ty, 0);
+ const anded = try cg.binOp(loaded, mask_val, int_elem_ty, .@"and");
+ const extended_value = try cg.intcast(rhs, ty, int_elem_ty);
+ const masked_value = try cg.binOp(extended_value, wrap_mask_val, int_elem_ty, .@"and");
const shifted_value = if (ptr_info.packed_offset.bit_offset > 0) shifted: {
- break :shifted try func.binOp(masked_value, shift_val, int_elem_ty, .shl);
+ break :shifted try cg.binOp(masked_value, shift_val, int_elem_ty, .shl);
} else masked_value;
- const result = try func.binOp(anded, shifted_value, int_elem_ty, .@"or");
+ const result = try cg.binOp(anded, shifted_value, int_elem_ty, .@"or");
// lhs is still on the stack
- try func.store(.stack, result, int_elem_ty, lhs.offset());
+ try cg.store(.stack, result, int_elem_ty, lhs.offset());
}
- return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
}
-fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
+fn store(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
assert(!(lhs != .stack and rhs == .stack));
- const pt = func.pt;
+ const pt = cg.pt;
const zcu = pt.zcu;
const abi_size = ty.abiSize(zcu);
switch (ty.zigTypeTag(zcu)) {
.error_union => {
const pl_ty = ty.errorUnionPayload(zcu);
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- return func.store(lhs, rhs, Type.anyerror, 0);
+ return cg.store(lhs, rhs, Type.anyerror, 0);
}
const len = @as(u32, @intCast(abi_size));
- return func.memcpy(lhs, rhs, .{ .imm32 = len });
+ return cg.memcpy(lhs, rhs, .{ .imm32 = len });
},
.optional => {
if (ty.isPtrLikeOptional(zcu)) {
- return func.store(lhs, rhs, Type.usize, 0);
+ return cg.store(lhs, rhs, Type.usize, 0);
}
const pl_ty = ty.optionalChild(zcu);
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- return func.store(lhs, rhs, Type.u8, 0);
+ return cg.store(lhs, rhs, Type.u8, 0);
}
if (pl_ty.zigTypeTag(zcu) == .error_set) {
- return func.store(lhs, rhs, Type.anyerror, 0);
+ return cg.store(lhs, rhs, Type.anyerror, 0);
}
const len = @as(u32, @intCast(abi_size));
- return func.memcpy(lhs, rhs, .{ .imm32 = len });
+ return cg.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .@"struct", .array, .@"union" => if (isByRef(ty, pt, func.target.*)) {
+ .@"struct", .array, .@"union" => if (isByRef(ty, zcu, cg.target)) {
const len = @as(u32, @intCast(abi_size));
- return func.memcpy(lhs, rhs, .{ .imm32 = len });
+ return cg.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .vector => switch (determineSimdStoreStrategy(ty, zcu, func.target.*)) {
+ .vector => switch (determineSimdStoreStrategy(ty, zcu, cg.target)) {
.unrolled => {
const len: u32 = @intCast(abi_size);
- return func.memcpy(lhs, rhs, .{ .imm32 = len });
+ return cg.memcpy(lhs, rhs, .{ .imm32 = len });
},
.direct => {
- try func.emitWValue(lhs);
- try func.lowerToStack(rhs);
+ try cg.emitWValue(lhs);
+ try cg.lowerToStack(rhs);
// TODO: Add helper functions for simd opcodes
- const extra_index: u32 = @intCast(func.mir_extra.items.len);
+ const extra_index = cg.extraLen();
// stores as := opcode, offset, alignment (opcode::memarg)
- try func.mir_extra.appendSlice(func.gpa, &[_]u32{
- std.wasm.simdOpcode(.v128_store),
+ try cg.mir_extra.appendSlice(cg.gpa, &[_]u32{
+ @intFromEnum(std.wasm.SimdOpcode.v128_store),
offset + lhs.offset(),
@intCast(ty.abiAlignment(zcu).toByteUnits() orelse 0),
});
- return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+ return cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
},
},
.pointer => {
if (ty.isSlice(zcu)) {
// store pointer first
// lower it to the stack so we do not have to store rhs into a local first
- try func.emitWValue(lhs);
- const ptr_local = try func.load(rhs, Type.usize, 0);
- try func.store(.stack, ptr_local, Type.usize, 0 + lhs.offset());
+ try cg.emitWValue(lhs);
+ const ptr_local = try cg.load(rhs, Type.usize, 0);
+ try cg.store(.stack, ptr_local, Type.usize, 0 + lhs.offset());
// retrieve length from rhs, and store that alongside lhs as well
- try func.emitWValue(lhs);
- const len_local = try func.load(rhs, Type.usize, func.ptrSize());
- try func.store(.stack, len_local, Type.usize, func.ptrSize() + lhs.offset());
+ try cg.emitWValue(lhs);
+ const len_local = try cg.load(rhs, Type.usize, cg.ptrSize());
+ try cg.store(.stack, len_local, Type.usize, cg.ptrSize() + lhs.offset());
return;
}
},
.int, .@"enum", .float => if (abi_size > 8 and abi_size <= 16) {
- try func.emitWValue(lhs);
- const lsb = try func.load(rhs, Type.u64, 0);
- try func.store(.stack, lsb, Type.u64, 0 + lhs.offset());
+ try cg.emitWValue(lhs);
+ const lsb = try cg.load(rhs, Type.u64, 0);
+ try cg.store(.stack, lsb, Type.u64, 0 + lhs.offset());
- try func.emitWValue(lhs);
- const msb = try func.load(rhs, Type.u64, 8);
- try func.store(.stack, msb, Type.u64, 8 + lhs.offset());
+ try cg.emitWValue(lhs);
+ const msb = try cg.load(rhs, Type.u64, 8);
+ try cg.store(.stack, msb, Type.u64, 8 + lhs.offset());
return;
} else if (abi_size > 16) {
- try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(zcu))) });
+ try cg.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(zcu))) });
},
else => if (abi_size > 8) {
- return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
+ return cg.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
ty.fmt(pt),
abi_size,
});
},
}
- try func.emitWValue(lhs);
+ try cg.emitWValue(lhs);
// In this case we're actually interested in storing the stack position
// into lhs, so we calculate that and emit that instead
- try func.lowerToStack(rhs);
+ try cg.lowerToStack(rhs);
- const valtype = typeToValtype(ty, pt, func.target.*);
+ const valtype = typeToValtype(ty, zcu, cg.target);
const opcode = buildOpcode(.{
.valtype1 = valtype,
.width = @as(u8, @intCast(abi_size * 8)),
@@ -2472,7 +2428,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
});
// store rhs value at stack pointer's location in memory
- try func.addMemArg(
+ try cg.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
.{
.offset = offset + lhs.offset(),
@@ -2481,26 +2437,26 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
);
}
-fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+fn airLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pt = cg.pt;
const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand = try cg.resolveInst(ty_op.operand);
const ty = ty_op.ty.toType();
- const ptr_ty = func.typeOf(ty_op.operand);
+ const ptr_ty = cg.typeOf(ty_op.operand);
const ptr_info = ptr_ty.ptrInfo(zcu);
- if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{ty_op.operand});
+ if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return cg.finishAir(inst, .none, &.{ty_op.operand});
const result = result: {
- if (isByRef(ty, pt, func.target.*)) {
- const new_local = try func.allocStack(ty);
- try func.store(new_local, operand, ty, 0);
+ if (isByRef(ty, zcu, cg.target)) {
+ const new_local = try cg.allocStack(ty);
+ try cg.store(new_local, operand, ty, 0);
break :result new_local;
}
if (ptr_info.packed_offset.host_size == 0) {
- break :result try func.load(operand, ty, 0);
+ break :result try cg.load(operand, ty, 0);
}
// at this point we have a non-natural alignment, we must
@@ -2511,45 +2467,44 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else if (ptr_info.packed_offset.host_size <= 8)
.{ .imm64 = ptr_info.packed_offset.bit_offset }
else
- return func.fail("TODO: airLoad where ptr to bitfield exceeds 64 bits", .{});
+ return cg.fail("TODO: airLoad where ptr to bitfield exceeds 64 bits", .{});
- const stack_loaded = try func.load(operand, int_elem_ty, 0);
- const shifted = try func.binOp(stack_loaded, shift_val, int_elem_ty, .shr);
- break :result try func.trunc(shifted, ty, int_elem_ty);
+ const stack_loaded = try cg.load(operand, int_elem_ty, 0);
+ const shifted = try cg.binOp(stack_loaded, shift_val, int_elem_ty, .shr);
+ break :result try cg.trunc(shifted, ty, int_elem_ty);
};
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
/// Loads an operand from the linear memory section.
/// NOTE: Leaves the value on the stack.
-fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn load(cg: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
+ const zcu = cg.pt.zcu;
// load local's value from memory by its stack position
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
if (ty.zigTypeTag(zcu) == .vector) {
// TODO: Add helper functions for simd opcodes
- const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
+ const extra_index = cg.extraLen();
// stores as := opcode, offset, alignment (opcode::memarg)
- try func.mir_extra.appendSlice(func.gpa, &[_]u32{
- std.wasm.simdOpcode(.v128_load),
+ try cg.mir_extra.appendSlice(cg.gpa, &[_]u32{
+ @intFromEnum(std.wasm.SimdOpcode.v128_load),
offset + operand.offset(),
@intCast(ty.abiAlignment(zcu).toByteUnits().?),
});
- try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+ try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return .stack;
}
const abi_size: u8 = @intCast(ty.abiSize(zcu));
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, pt, func.target.*),
+ .valtype1 = typeToValtype(ty, zcu, cg.target),
.width = abi_size * 8,
.op = .load,
.signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned,
});
- try func.addMemArg(
+ try cg.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
.{
.offset = offset + operand.offset(),
@@ -2560,18 +2515,18 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
return .stack;
}
-fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+fn airArg(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pt = cg.pt;
const zcu = pt.zcu;
- const arg_index = func.arg_index;
- const arg = func.args[arg_index];
- const cc = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?.cc;
- const arg_ty = func.typeOfIndex(inst);
+ const arg_index = cg.arg_index;
+ const arg = cg.args[arg_index];
+ const cc = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?.cc;
+ const arg_ty = cg.typeOfIndex(inst);
if (cc == .wasm_watc) {
const arg_classes = abi.classifyType(arg_ty, zcu);
for (arg_classes) |class| {
if (class != .none) {
- func.arg_index += 1;
+ cg.arg_index += 1;
}
}
@@ -2579,44 +2534,30 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// we combine them into a single stack value
if (arg_classes[0] == .direct and arg_classes[1] == .direct) {
if (arg_ty.zigTypeTag(zcu) != .int and arg_ty.zigTypeTag(zcu) != .float) {
- return func.fail(
+ return cg.fail(
"TODO: Implement C-ABI argument for type '{}'",
.{arg_ty.fmt(pt)},
);
}
- const result = try func.allocStack(arg_ty);
- try func.store(result, arg, Type.u64, 0);
- try func.store(result, func.args[arg_index + 1], Type.u64, 8);
- return func.finishAir(inst, result, &.{});
+ const result = try cg.allocStack(arg_ty);
+ try cg.store(result, arg, Type.u64, 0);
+ try cg.store(result, cg.args[arg_index + 1], Type.u64, 8);
+ return cg.finishAir(inst, result, &.{});
}
} else {
- func.arg_index += 1;
+ cg.arg_index += 1;
}
- switch (func.debug_output) {
- .dwarf => |dwarf| {
- const name = func.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
- if (name != .none) try dwarf.genLocalDebugInfo(
- .local_arg,
- name.toSlice(func.air),
- arg_ty,
- .{ .wasm_ext = .{ .local = arg.local.value } },
- );
- },
- else => {},
- }
-
- return func.finishAir(inst, arg, &.{});
+ return cg.finishAir(inst, arg, &.{});
}
-fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
- const lhs_ty = func.typeOf(bin_op.lhs);
- const rhs_ty = func.typeOf(bin_op.rhs);
+fn airBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
+ const lhs_ty = cg.typeOf(bin_op.lhs);
+ const rhs_ty = cg.typeOf(bin_op.rhs);
// For certain operations, such as shifting, the types are different.
// When converting this to a WebAssembly type, they *must* match to perform
@@ -2626,122 +2567,121 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const result = switch (op) {
.shr, .shl => result: {
const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(zcu))) orelse {
- return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
+ return cg.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
};
const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(zcu))).?;
const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128)
- try (try func.intcast(rhs, rhs_ty, lhs_ty)).toLocal(func, lhs_ty)
+ try (try cg.intcast(rhs, rhs_ty, lhs_ty)).toLocal(cg, lhs_ty)
else
rhs;
- break :result try func.binOp(lhs, new_rhs, lhs_ty, op);
+ break :result try cg.binOp(lhs, new_rhs, lhs_ty, op);
},
- else => try func.binOp(lhs, rhs, lhs_ty, op),
+ else => try cg.binOp(lhs, rhs, lhs_ty, op),
};
- return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
/// Performs a binary operation on the given `WValue`'s
/// NOTE: THis leaves the value on top of the stack.
-fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
- const pt = func.pt;
+fn binOp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+ const pt = cg.pt;
const zcu = pt.zcu;
assert(!(lhs != .stack and rhs == .stack));
if (ty.isAnyFloat()) {
const float_op = FloatOp.fromOp(op);
- return func.floatOp(float_op, ty, &.{ lhs, rhs });
+ return cg.floatOp(float_op, ty, &.{ lhs, rhs });
}
- if (isByRef(ty, pt, func.target.*)) {
+ if (isByRef(ty, zcu, cg.target)) {
if (ty.zigTypeTag(zcu) == .int) {
- return func.binOpBigInt(lhs, rhs, ty, op);
+ return cg.binOpBigInt(lhs, rhs, ty, op);
} else {
- return func.fail(
+ return cg.fail(
"TODO: Implement binary operation for type: {}",
.{ty.fmt(pt)},
);
}
}
- const opcode: wasm.Opcode = buildOpcode(.{
+ const opcode: std.wasm.Opcode = buildOpcode(.{
.op = op,
- .valtype1 = typeToValtype(ty, pt, func.target.*),
+ .valtype1 = typeToValtype(ty, zcu, cg.target),
.signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned,
});
- try func.emitWValue(lhs);
- try func.emitWValue(rhs);
+ try cg.emitWValue(lhs);
+ try cg.emitWValue(rhs);
- try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
}
-fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn binOpBigInt(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+ const zcu = cg.pt.zcu;
const int_info = ty.intInfo(zcu);
if (int_info.bits > 128) {
- return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{});
+ return cg.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{});
}
switch (op) {
- .mul => return func.callIntrinsic("__multi3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
+ .mul => return cg.callIntrinsic(.__multi3, &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
.div => switch (int_info.signedness) {
- .signed => return func.callIntrinsic("__divti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
- .unsigned => return func.callIntrinsic("__udivti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
+ .signed => return cg.callIntrinsic(.__divti3, &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
+ .unsigned => return cg.callIntrinsic(.__udivti3, &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
},
.rem => switch (int_info.signedness) {
- .signed => return func.callIntrinsic("__modti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
- .unsigned => return func.callIntrinsic("__umodti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
+ .signed => return cg.callIntrinsic(.__modti3, &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
+ .unsigned => return cg.callIntrinsic(.__umodti3, &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
},
.shr => switch (int_info.signedness) {
- .signed => return func.callIntrinsic("__ashrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
- .unsigned => return func.callIntrinsic("__lshrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
+ .signed => return cg.callIntrinsic(.__ashrti3, &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
+ .unsigned => return cg.callIntrinsic(.__lshrti3, &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
},
- .shl => return func.callIntrinsic("__ashlti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
+ .shl => return cg.callIntrinsic(.__ashlti3, &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
.@"and", .@"or", .xor => {
- const result = try func.allocStack(ty);
- try func.emitWValue(result);
- const lhs_lsb = try func.load(lhs, Type.u64, 0);
- const rhs_lsb = try func.load(rhs, Type.u64, 0);
- const op_lsb = try func.binOp(lhs_lsb, rhs_lsb, Type.u64, op);
- try func.store(.stack, op_lsb, Type.u64, result.offset());
-
- try func.emitWValue(result);
- const lhs_msb = try func.load(lhs, Type.u64, 8);
- const rhs_msb = try func.load(rhs, Type.u64, 8);
- const op_msb = try func.binOp(lhs_msb, rhs_msb, Type.u64, op);
- try func.store(.stack, op_msb, Type.u64, result.offset() + 8);
+ const result = try cg.allocStack(ty);
+ try cg.emitWValue(result);
+ const lhs_lsb = try cg.load(lhs, Type.u64, 0);
+ const rhs_lsb = try cg.load(rhs, Type.u64, 0);
+ const op_lsb = try cg.binOp(lhs_lsb, rhs_lsb, Type.u64, op);
+ try cg.store(.stack, op_lsb, Type.u64, result.offset());
+
+ try cg.emitWValue(result);
+ const lhs_msb = try cg.load(lhs, Type.u64, 8);
+ const rhs_msb = try cg.load(rhs, Type.u64, 8);
+ const op_msb = try cg.binOp(lhs_msb, rhs_msb, Type.u64, op);
+ try cg.store(.stack, op_msb, Type.u64, result.offset() + 8);
return result;
},
.add, .sub => {
- const result = try func.allocStack(ty);
- var lhs_lsb = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
- defer lhs_lsb.free(func);
- var rhs_lsb = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64);
- defer rhs_lsb.free(func);
- var op_lsb = try (try func.binOp(lhs_lsb, rhs_lsb, Type.u64, op)).toLocal(func, Type.u64);
- defer op_lsb.free(func);
-
- const lhs_msb = try func.load(lhs, Type.u64, 8);
- const rhs_msb = try func.load(rhs, Type.u64, 8);
- const op_msb = try func.binOp(lhs_msb, rhs_msb, Type.u64, op);
+ const result = try cg.allocStack(ty);
+ var lhs_lsb = try (try cg.load(lhs, Type.u64, 0)).toLocal(cg, Type.u64);
+ defer lhs_lsb.free(cg);
+ var rhs_lsb = try (try cg.load(rhs, Type.u64, 0)).toLocal(cg, Type.u64);
+ defer rhs_lsb.free(cg);
+ var op_lsb = try (try cg.binOp(lhs_lsb, rhs_lsb, Type.u64, op)).toLocal(cg, Type.u64);
+ defer op_lsb.free(cg);
+
+ const lhs_msb = try cg.load(lhs, Type.u64, 8);
+ const rhs_msb = try cg.load(rhs, Type.u64, 8);
+ const op_msb = try cg.binOp(lhs_msb, rhs_msb, Type.u64, op);
const lt = if (op == .add) blk: {
- break :blk try func.cmp(op_lsb, rhs_lsb, Type.u64, .lt);
+ break :blk try cg.cmp(op_lsb, rhs_lsb, Type.u64, .lt);
} else if (op == .sub) blk: {
- break :blk try func.cmp(lhs_lsb, rhs_lsb, Type.u64, .lt);
+ break :blk try cg.cmp(lhs_lsb, rhs_lsb, Type.u64, .lt);
} else unreachable;
- const tmp = try func.intcast(lt, Type.u32, Type.u64);
- var tmp_op = try (try func.binOp(op_msb, tmp, Type.u64, op)).toLocal(func, Type.u64);
- defer tmp_op.free(func);
+ const tmp = try cg.intcast(lt, Type.u32, Type.u64);
+ var tmp_op = try (try cg.binOp(op_msb, tmp, Type.u64, op)).toLocal(cg, Type.u64);
+ defer tmp_op.free(cg);
- try func.store(result, op_lsb, Type.u64, 0);
- try func.store(result, tmp_op, Type.u64, 8);
+ try cg.store(result, op_lsb, Type.u64, 0);
+ try cg.store(result, tmp_op, Type.u64, 8);
return result;
},
- else => return func.fail("TODO: Implement binary operation for big integers: '{s}'", .{@tagName(op)}),
+ else => return cg.fail("TODO: Implement binary operation for big integers: '{s}'", .{@tagName(op)}),
}
}
@@ -2819,199 +2759,214 @@ const FloatOp = enum {
=> null,
};
}
+
+ fn intrinsic(op: FloatOp, bits: u16) Mir.Intrinsic {
+ return switch (op) {
+ inline .add, .sub, .div, .mul => |ct_op| switch (bits) {
+ inline 16, 80, 128 => |ct_bits| @field(
+ Mir.Intrinsic,
+ "__" ++ @tagName(ct_op) ++ compilerRtFloatAbbrev(ct_bits) ++ "f3",
+ ),
+ else => unreachable,
+ },
+
+ inline .ceil,
+ .fabs,
+ .floor,
+ .fmax,
+ .fmin,
+ .round,
+ .sqrt,
+ .trunc,
+ => |ct_op| switch (bits) {
+ inline 16, 80, 128 => |ct_bits| @field(
+ Mir.Intrinsic,
+ libcFloatPrefix(ct_bits) ++ @tagName(ct_op) ++ libcFloatSuffix(ct_bits),
+ ),
+ else => unreachable,
+ },
+
+ inline .cos,
+ .exp,
+ .exp2,
+ .fma,
+ .fmod,
+ .log,
+ .log10,
+ .log2,
+ .sin,
+ .tan,
+ => |ct_op| switch (bits) {
+ inline 16, 32, 64, 80, 128 => |ct_bits| @field(
+ Mir.Intrinsic,
+ libcFloatPrefix(ct_bits) ++ @tagName(ct_op) ++ libcFloatSuffix(ct_bits),
+ ),
+ else => unreachable,
+ },
+
+ .neg => unreachable,
+ };
+ }
};
-fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+fn airAbs(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pt = cg.pt;
const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const ty = func.typeOf(ty_op.operand);
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand = try cg.resolveInst(ty_op.operand);
+ const ty = cg.typeOf(ty_op.operand);
const scalar_ty = ty.scalarType(zcu);
switch (scalar_ty.zigTypeTag(zcu)) {
.int => if (ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
+ return cg.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
} else {
const int_bits = ty.intInfo(zcu).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
- return func.fail("TODO: airAbs for signed integers larger than '{d}' bits", .{int_bits});
+ return cg.fail("TODO: airAbs for signed integers larger than '{d}' bits", .{int_bits});
};
switch (wasm_bits) {
32 => {
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
- try func.addImm32(31);
- try func.addTag(.i32_shr_s);
+ try cg.addImm32(31);
+ try cg.addTag(.i32_shr_s);
- var tmp = try func.allocLocal(ty);
- defer tmp.free(func);
- try func.addLabel(.local_tee, tmp.local.value);
+ var tmp = try cg.allocLocal(ty);
+ defer tmp.free(cg);
+ try cg.addLocal(.local_tee, tmp.local.value);
- try func.emitWValue(operand);
- try func.addTag(.i32_xor);
- try func.emitWValue(tmp);
- try func.addTag(.i32_sub);
- return func.finishAir(inst, .stack, &.{ty_op.operand});
+ try cg.emitWValue(operand);
+ try cg.addTag(.i32_xor);
+ try cg.emitWValue(tmp);
+ try cg.addTag(.i32_sub);
+ return cg.finishAir(inst, .stack, &.{ty_op.operand});
},
64 => {
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
- try func.addImm64(63);
- try func.addTag(.i64_shr_s);
+ try cg.addImm64(63);
+ try cg.addTag(.i64_shr_s);
- var tmp = try func.allocLocal(ty);
- defer tmp.free(func);
- try func.addLabel(.local_tee, tmp.local.value);
+ var tmp = try cg.allocLocal(ty);
+ defer tmp.free(cg);
+ try cg.addLocal(.local_tee, tmp.local.value);
- try func.emitWValue(operand);
- try func.addTag(.i64_xor);
- try func.emitWValue(tmp);
- try func.addTag(.i64_sub);
- return func.finishAir(inst, .stack, &.{ty_op.operand});
+ try cg.emitWValue(operand);
+ try cg.addTag(.i64_xor);
+ try cg.emitWValue(tmp);
+ try cg.addTag(.i64_sub);
+ return cg.finishAir(inst, .stack, &.{ty_op.operand});
},
128 => {
- const mask = try func.allocStack(Type.u128);
- try func.emitWValue(mask);
- try func.emitWValue(mask);
+ const mask = try cg.allocStack(Type.u128);
+ try cg.emitWValue(mask);
+ try cg.emitWValue(mask);
- _ = try func.load(operand, Type.u64, 8);
- try func.addImm64(63);
- try func.addTag(.i64_shr_s);
+ _ = try cg.load(operand, Type.u64, 8);
+ try cg.addImm64(63);
+ try cg.addTag(.i64_shr_s);
- var tmp = try func.allocLocal(Type.u64);
- defer tmp.free(func);
- try func.addLabel(.local_tee, tmp.local.value);
- try func.store(.stack, .stack, Type.u64, mask.offset() + 0);
- try func.emitWValue(tmp);
- try func.store(.stack, .stack, Type.u64, mask.offset() + 8);
+ var tmp = try cg.allocLocal(Type.u64);
+ defer tmp.free(cg);
+ try cg.addLocal(.local_tee, tmp.local.value);
+ try cg.store(.stack, .stack, Type.u64, mask.offset() + 0);
+ try cg.emitWValue(tmp);
+ try cg.store(.stack, .stack, Type.u64, mask.offset() + 8);
- const a = try func.binOpBigInt(operand, mask, Type.u128, .xor);
- const b = try func.binOpBigInt(a, mask, Type.u128, .sub);
+ const a = try cg.binOpBigInt(operand, mask, Type.u128, .xor);
+ const b = try cg.binOpBigInt(a, mask, Type.u128, .sub);
- return func.finishAir(inst, b, &.{ty_op.operand});
+ return cg.finishAir(inst, b, &.{ty_op.operand});
},
else => unreachable,
}
},
.float => {
- const result = try func.floatOp(.fabs, ty, &.{operand});
- return func.finishAir(inst, result, &.{ty_op.operand});
+ const result = try cg.floatOp(.fabs, ty, &.{operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
},
else => unreachable,
}
}
-fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError!void {
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try func.resolveInst(un_op);
- const ty = func.typeOf(un_op);
+fn airUnaryFloatOp(cg: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError!void {
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const operand = try cg.resolveInst(un_op);
+ const ty = cg.typeOf(un_op);
- const result = try func.floatOp(op, ty, &.{operand});
- return func.finishAir(inst, result, &.{un_op});
+ const result = try cg.floatOp(op, ty, &.{operand});
+ return cg.finishAir(inst, result, &.{un_op});
}
-fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn floatOp(cg: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue {
+ const zcu = cg.pt.zcu;
if (ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO: Implement floatOps for vectors", .{});
+ return cg.fail("TODO: Implement floatOps for vectors", .{});
}
- const float_bits = ty.floatBits(func.target.*);
+ const float_bits = ty.floatBits(cg.target.*);
if (float_op == .neg) {
- return func.floatNeg(ty, args[0]);
+ return cg.floatNeg(ty, args[0]);
}
if (float_bits == 32 or float_bits == 64) {
if (float_op.toOp()) |op| {
for (args) |operand| {
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
}
- const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, pt, func.target.*) });
- try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+ const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, zcu, cg.target) });
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
}
}
- var fn_name_buf: [64]u8 = undefined;
- const fn_name = switch (float_op) {
- .add,
- .sub,
- .div,
- .mul,
- => std.fmt.bufPrint(&fn_name_buf, "__{s}{s}f3", .{
- @tagName(float_op), target_util.compilerRtFloatAbbrev(float_bits),
- }) catch unreachable,
-
- .ceil,
- .cos,
- .exp,
- .exp2,
- .fabs,
- .floor,
- .fma,
- .fmax,
- .fmin,
- .fmod,
- .log,
- .log10,
- .log2,
- .round,
- .sin,
- .sqrt,
- .tan,
- .trunc,
- => std.fmt.bufPrint(&fn_name_buf, "{s}{s}{s}", .{
- target_util.libcFloatPrefix(float_bits), @tagName(float_op), target_util.libcFloatSuffix(float_bits),
- }) catch unreachable,
- .neg => unreachable, // handled above
- };
+ const intrinsic = float_op.intrinsic(float_bits);
// fma requires three operands
var param_types_buffer: [3]InternPool.Index = .{ ty.ip_index, ty.ip_index, ty.ip_index };
const param_types = param_types_buffer[0..args.len];
- return func.callIntrinsic(fn_name, param_types, ty, args);
+ return cg.callIntrinsic(intrinsic, param_types, ty, args);
}
/// NOTE: The result value remains on top of the stack.
-fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
- const float_bits = ty.floatBits(func.target.*);
+fn floatNeg(cg: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
+ const float_bits = ty.floatBits(cg.target.*);
switch (float_bits) {
16 => {
- try func.emitWValue(arg);
- try func.addImm32(0x8000);
- try func.addTag(.i32_xor);
+ try cg.emitWValue(arg);
+ try cg.addImm32(0x8000);
+ try cg.addTag(.i32_xor);
return .stack;
},
32, 64 => {
- try func.emitWValue(arg);
- const val_type: wasm.Valtype = if (float_bits == 32) .f32 else .f64;
+ try cg.emitWValue(arg);
+ const val_type: std.wasm.Valtype = if (float_bits == 32) .f32 else .f64;
const opcode = buildOpcode(.{ .op = .neg, .valtype1 = val_type });
- try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
},
80, 128 => {
- const result = try func.allocStack(ty);
- try func.emitWValue(result);
- try func.emitWValue(arg);
- try func.addMemArg(.i64_load, .{ .offset = 0 + arg.offset(), .alignment = 2 });
- try func.addMemArg(.i64_store, .{ .offset = 0 + result.offset(), .alignment = 2 });
+ const result = try cg.allocStack(ty);
+ try cg.emitWValue(result);
+ try cg.emitWValue(arg);
+ try cg.addMemArg(.i64_load, .{ .offset = 0 + arg.offset(), .alignment = 2 });
+ try cg.addMemArg(.i64_store, .{ .offset = 0 + result.offset(), .alignment = 2 });
- try func.emitWValue(result);
- try func.emitWValue(arg);
- try func.addMemArg(.i64_load, .{ .offset = 8 + arg.offset(), .alignment = 2 });
+ try cg.emitWValue(result);
+ try cg.emitWValue(arg);
+ try cg.addMemArg(.i64_load, .{ .offset = 8 + arg.offset(), .alignment = 2 });
if (float_bits == 80) {
- try func.addImm64(0x8000);
- try func.addTag(.i64_xor);
- try func.addMemArg(.i64_store16, .{ .offset = 8 + result.offset(), .alignment = 2 });
+ try cg.addImm64(0x8000);
+ try cg.addTag(.i64_xor);
+ try cg.addMemArg(.i64_store16, .{ .offset = 8 + result.offset(), .alignment = 2 });
} else {
- try func.addImm64(0x8000000000000000);
- try func.addTag(.i64_xor);
- try func.addMemArg(.i64_store, .{ .offset = 8 + result.offset(), .alignment = 2 });
+ try cg.addImm64(0x8000000000000000);
+ try cg.addTag(.i64_xor);
+ try cg.addMemArg(.i64_store, .{ .offset = 8 + result.offset(), .alignment = 2 });
}
return result;
},
@@ -3019,18 +2974,17 @@ fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
}
}
-fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airWrapBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
- const lhs_ty = func.typeOf(bin_op.lhs);
- const rhs_ty = func.typeOf(bin_op.rhs);
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
+ const lhs_ty = cg.typeOf(bin_op.lhs);
+ const rhs_ty = cg.typeOf(bin_op.rhs);
if (lhs_ty.zigTypeTag(zcu) == .vector or rhs_ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO: Implement wrapping arithmetic for vectors", .{});
+ return cg.fail("TODO: Implement wrapping arithmetic for vectors", .{});
}
// For certain operations, such as shifting, the types are different.
@@ -3041,90 +2995,89 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const result = switch (op) {
.shr, .shl => result: {
const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(zcu))) orelse {
- return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
+ return cg.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
};
const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(zcu))).?;
const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128)
- try (try func.intcast(rhs, rhs_ty, lhs_ty)).toLocal(func, lhs_ty)
+ try (try cg.intcast(rhs, rhs_ty, lhs_ty)).toLocal(cg, lhs_ty)
else
rhs;
- break :result try func.wrapBinOp(lhs, new_rhs, lhs_ty, op);
+ break :result try cg.wrapBinOp(lhs, new_rhs, lhs_ty, op);
},
- else => try func.wrapBinOp(lhs, rhs, lhs_ty, op),
+ else => try cg.wrapBinOp(lhs, rhs, lhs_ty, op),
};
- return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
/// Performs a wrapping binary operation.
/// Asserts rhs is not a stack value when lhs also isn't.
/// NOTE: Leaves the result on the stack when its Type is <= 64 bits
-fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
- const bin_local = try func.binOp(lhs, rhs, ty, op);
- return func.wrapOperand(bin_local, ty);
+fn wrapBinOp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+ const bin_local = try cg.binOp(lhs, rhs, ty, op);
+ return cg.wrapOperand(bin_local, ty);
}
/// Wraps an operand based on a given type's bitsize.
/// Asserts `Type` is <= 128 bits.
/// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack, if wrapping was needed.
-fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn wrapOperand(cg: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
+ const zcu = cg.pt.zcu;
assert(ty.abiSize(zcu) <= 16);
const int_bits: u16 = @intCast(ty.bitSize(zcu)); // TODO use ty.intInfo(zcu).bits
const wasm_bits = toWasmBits(int_bits) orelse {
- return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{int_bits});
+ return cg.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{int_bits});
};
if (wasm_bits == int_bits) return operand;
switch (wasm_bits) {
32 => {
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
if (ty.isSignedInt(zcu)) {
- try func.addImm32(32 - int_bits);
- try func.addTag(.i32_shl);
- try func.addImm32(32 - int_bits);
- try func.addTag(.i32_shr_s);
+ try cg.addImm32(32 - int_bits);
+ try cg.addTag(.i32_shl);
+ try cg.addImm32(32 - int_bits);
+ try cg.addTag(.i32_shr_s);
} else {
- try func.addImm32(~@as(u32, 0) >> @intCast(32 - int_bits));
- try func.addTag(.i32_and);
+ try cg.addImm32(~@as(u32, 0) >> @intCast(32 - int_bits));
+ try cg.addTag(.i32_and);
}
return .stack;
},
64 => {
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
if (ty.isSignedInt(zcu)) {
- try func.addImm64(64 - int_bits);
- try func.addTag(.i64_shl);
- try func.addImm64(64 - int_bits);
- try func.addTag(.i64_shr_s);
+ try cg.addImm64(64 - int_bits);
+ try cg.addTag(.i64_shl);
+ try cg.addImm64(64 - int_bits);
+ try cg.addTag(.i64_shr_s);
} else {
- try func.addImm64(~@as(u64, 0) >> @intCast(64 - int_bits));
- try func.addTag(.i64_and);
+ try cg.addImm64(~@as(u64, 0) >> @intCast(64 - int_bits));
+ try cg.addTag(.i64_and);
}
return .stack;
},
128 => {
assert(operand != .stack);
- const result = try func.allocStack(ty);
+ const result = try cg.allocStack(ty);
- try func.emitWValue(result);
- _ = try func.load(operand, Type.u64, 0);
- try func.store(.stack, .stack, Type.u64, result.offset());
+ try cg.emitWValue(result);
+ _ = try cg.load(operand, Type.u64, 0);
+ try cg.store(.stack, .stack, Type.u64, result.offset());
- try func.emitWValue(result);
- _ = try func.load(operand, Type.u64, 8);
+ try cg.emitWValue(result);
+ _ = try cg.load(operand, Type.u64, 8);
if (ty.isSignedInt(zcu)) {
- try func.addImm64(128 - int_bits);
- try func.addTag(.i64_shl);
- try func.addImm64(128 - int_bits);
- try func.addTag(.i64_shr_s);
+ try cg.addImm64(128 - int_bits);
+ try cg.addTag(.i64_shl);
+ try cg.addImm64(128 - int_bits);
+ try cg.addTag(.i64_shr_s);
} else {
- try func.addImm64(~@as(u64, 0) >> @intCast(128 - int_bits));
- try func.addTag(.i64_and);
+ try cg.addImm64(~@as(u64, 0) >> @intCast(128 - int_bits));
+ try cg.addTag(.i64_and);
}
- try func.store(.stack, .stack, Type.u64, result.offset() + 8);
+ try cg.store(.stack, .stack, Type.u64, result.offset() + 8);
return result;
},
@@ -3132,17 +3085,17 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
}
}
-fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerError!WValue {
- const pt = func.pt;
+fn lowerPtr(cg: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerError!WValue {
+ const pt = cg.pt;
const zcu = pt.zcu;
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
- .nav => |nav| return func.lowerNavRef(nav, @intCast(offset)),
- .uav => |uav| return func.lowerUavRef(uav, @intCast(offset)),
- .int => return func.lowerConstant(try pt.intValue(Type.usize, offset), Type.usize),
- .eu_payload => return func.fail("Wasm TODO: lower error union payload pointer", .{}),
- .opt_payload => |opt_ptr| return func.lowerPtr(opt_ptr, offset),
+ .nav => |nav| return .{ .nav_ref = .{ .nav_index = nav, .offset = @intCast(offset) } },
+ .uav => |uav| return .{ .uav_ref = .{ .ip_index = uav.val, .offset = @intCast(offset), .orig_ptr_ty = uav.orig_ty } },
+ .int => return cg.lowerConstant(try pt.intValue(Type.usize, offset), Type.usize),
+ .eu_payload => return cg.fail("Wasm TODO: lower error union payload pointer", .{}),
+ .opt_payload => |opt_ptr| return cg.lowerPtr(opt_ptr, offset),
.field => |field| {
const base_ptr = Value.fromInterned(field.base);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
@@ -3151,7 +3104,7 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr
assert(base_ty.isSlice(zcu));
break :off switch (field.index) {
Value.slice_ptr_index => 0,
- Value.slice_len_index => @divExact(func.target.ptrBitWidth(), 8),
+ Value.slice_len_index => @divExact(cg.target.ptrBitWidth(), 8),
else => unreachable,
};
},
@@ -3177,70 +3130,19 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr
},
else => unreachable,
};
- return func.lowerPtr(field.base, offset + field_off);
+ return cg.lowerPtr(field.base, offset + field_off);
},
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
};
}
-fn lowerUavRef(
- func: *CodeGen,
- uav: InternPool.Key.Ptr.BaseAddr.Uav,
- offset: u32,
-) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty = Type.fromInterned(zcu.intern_pool.typeOf(uav.val));
-
- const is_fn_body = ty.zigTypeTag(zcu) == .@"fn";
- if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- return .{ .imm32 = 0xaaaaaaaa };
- }
-
- const decl_align = zcu.intern_pool.indexToKey(uav.orig_ty).ptr_type.flags.alignment;
- const res = try func.bin_file.lowerUav(pt, uav.val, decl_align, func.src_loc);
- const target_sym_index = switch (res) {
- .mcv => |mcv| mcv.load_symbol,
- .fail => |err_msg| {
- func.err_msg = err_msg;
- return error.CodegenFail;
- },
- };
- if (is_fn_body) {
- return .{ .function_index = target_sym_index };
- } else if (offset == 0) {
- return .{ .memory = target_sym_index };
- } else return .{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } };
-}
-
-fn lowerNavRef(func: *CodeGen, nav_index: InternPool.Nav.Index, offset: u32) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ip = &zcu.intern_pool;
-
- const nav_ty = ip.getNav(nav_index).typeOf(ip);
- if (!ip.isFunctionType(nav_ty) and !Type.fromInterned(nav_ty).hasRuntimeBitsIgnoreComptime(zcu)) {
- return .{ .imm32 = 0xaaaaaaaa };
- }
-
- const atom_index = try func.bin_file.getOrCreateAtomForNav(pt, nav_index);
- const atom = func.bin_file.getAtom(atom_index);
-
- const target_sym_index = @intFromEnum(atom.sym_index);
- if (ip.isFunctionType(nav_ty)) {
- return .{ .function_index = target_sym_index };
- } else if (offset == 0) {
- return .{ .memory = target_sym_index };
- } else return .{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } };
-}
-
/// Asserts that `isByRef` returns `false` for `ty`.
-fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
- const pt = func.pt;
+fn lowerConstant(cg: *CodeGen, val: Value, ty: Type) InnerError!WValue {
+ const pt = cg.pt;
const zcu = pt.zcu;
- assert(!isByRef(ty, pt, func.target.*));
+ assert(!isByRef(ty, zcu, cg.target));
const ip = &zcu.intern_pool;
- if (val.isUndefDeep(zcu)) return func.emitUndefined(ty);
+ if (val.isUndefDeep(zcu)) return cg.emitUndefined(ty);
switch (ip.indexToKey(val.ip_index)) {
.int_type,
@@ -3319,14 +3221,14 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
const payload_type = ty.errorUnionPayload(zcu);
if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
// We use the error type directly as the type.
- return func.lowerConstant(err_val, err_ty);
+ return cg.lowerConstant(err_val, err_ty);
}
- return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{});
+ return cg.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{});
},
.enum_tag => |enum_tag| {
const int_tag_ty = ip.typeOf(enum_tag.int);
- return func.lowerConstant(Value.fromInterned(enum_tag.int), Type.fromInterned(int_tag_ty));
+ return cg.lowerConstant(Value.fromInterned(enum_tag.int), Type.fromInterned(int_tag_ty));
},
.float => |float| switch (float.storage) {
.f16 => |f16_val| return .{ .imm32 = @as(u16, @bitCast(f16_val)) },
@@ -3334,18 +3236,12 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
.f64 => |f64_val| return .{ .float64 = f64_val },
else => unreachable,
},
- .slice => switch (try func.bin_file.lowerUav(pt, val.toIntern(), .none, func.src_loc)) {
- .mcv => |mcv| return .{ .memory = mcv.load_symbol },
- .fail => |err_msg| {
- func.err_msg = err_msg;
- return error.CodegenFail;
- },
- },
- .ptr => return func.lowerPtr(val.toIntern(), 0),
+ .slice => unreachable, // isByRef == true
+ .ptr => return cg.lowerPtr(val.toIntern(), 0),
.opt => if (ty.optionalReprIsPayload(zcu)) {
const pl_ty = ty.optionalChild(zcu);
if (val.optionalValue(zcu)) |payload| {
- return func.lowerConstant(payload, pl_ty);
+ return cg.lowerConstant(payload, pl_ty);
} else {
return .{ .imm32 = 0 };
}
@@ -3353,12 +3249,12 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
return .{ .imm32 = @intFromBool(!val.isNull(zcu)) };
},
.aggregate => switch (ip.indexToKey(ty.ip_index)) {
- .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}),
+ .array_type => return cg.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}),
.vector_type => {
- assert(determineSimdStoreStrategy(ty, zcu, func.target.*) == .direct);
+ assert(determineSimdStoreStrategy(ty, zcu, cg.target) == .direct);
var buf: [16]u8 = undefined;
val.writeToMemory(pt, &buf) catch unreachable;
- return func.storeSimdImmd(buf);
+ return cg.storeSimdImmd(buf);
},
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
@@ -3372,7 +3268,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
backing_int_ty,
mem.readInt(u64, &buf, .little),
);
- return func.lowerConstant(int_val, backing_int_ty);
+ return cg.lowerConstant(int_val, backing_int_ty);
},
else => unreachable,
},
@@ -3385,7 +3281,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
const field_index = zcu.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
break :field_ty Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
};
- return func.lowerConstant(Value.fromInterned(un.val), constant_ty);
+ return cg.lowerConstant(Value.fromInterned(un.val), constant_ty);
},
.memoized_call => unreachable,
}
@@ -3393,15 +3289,14 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
/// Stores the value as a 128bit-immediate value by storing it inside
/// the list and returning the index into this list as `WValue`.
-fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
- const index = @as(u32, @intCast(func.simd_immediates.items.len));
- try func.simd_immediates.append(func.gpa, value);
+fn storeSimdImmd(cg: *CodeGen, value: [16]u8) !WValue {
+ const index = @as(u32, @intCast(cg.simd_immediates.items.len));
+ try cg.simd_immediates.append(cg.gpa, value);
return .{ .imm128 = index };
}
-fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn emitUndefined(cg: *CodeGen, ty: Type) InnerError!WValue {
+ const zcu = cg.pt.zcu;
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
.bool, .error_set => return .{ .imm32 = 0xaaaaaaaa },
@@ -3410,21 +3305,20 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
33...64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa },
else => unreachable,
},
- .float => switch (ty.floatBits(func.target.*)) {
+ .float => switch (ty.floatBits(cg.target.*)) {
16 => return .{ .imm32 = 0xaaaaaaaa },
32 => return .{ .float32 = @as(f32, @bitCast(@as(u32, 0xaaaaaaaa))) },
64 => return .{ .float64 = @as(f64, @bitCast(@as(u64, 0xaaaaaaaaaaaaaaaa))) },
else => unreachable,
},
- .pointer => switch (func.arch()) {
+ .pointer => switch (cg.ptr_size) {
.wasm32 => return .{ .imm32 = 0xaaaaaaaa },
.wasm64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa },
- else => unreachable,
},
.optional => {
const pl_ty = ty.optionalChild(zcu);
if (ty.optionalReprIsPayload(zcu)) {
- return func.emitUndefined(pl_ty);
+ return cg.emitUndefined(pl_ty);
}
return .{ .imm32 = 0xaaaaaaaa };
},
@@ -3433,26 +3327,25 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
},
.@"struct" => {
const packed_struct = zcu.typeToPackedStruct(ty).?;
- return func.emitUndefined(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)));
+ return cg.emitUndefined(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)));
},
- else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(zcu)}),
+ else => return cg.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(zcu)}),
}
}
/// Returns a `Value` as a signed 32 bit value.
/// It's illegal to provide a value with a type that cannot be represented
/// as an integer value.
-fn valueAsI32(func: *const CodeGen, val: Value) i32 {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn valueAsI32(cg: *const CodeGen, val: Value) i32 {
+ const zcu = cg.pt.zcu;
const ip = &zcu.intern_pool;
switch (val.toIntern()) {
.bool_true => return 1,
.bool_false => return 0,
else => return switch (ip.indexToKey(val.ip_index)) {
- .enum_tag => |enum_tag| intIndexAsI32(ip, enum_tag.int, pt),
- .int => |int| intStorageAsI32(int.storage, pt),
+ .enum_tag => |enum_tag| intIndexAsI32(ip, enum_tag.int, zcu),
+ .int => |int| intStorageAsI32(int.storage, zcu),
.ptr => |ptr| {
assert(ptr.base_addr == .int);
return @intCast(ptr.byte_offset);
@@ -3463,12 +3356,11 @@ fn valueAsI32(func: *const CodeGen, val: Value) i32 {
}
}
-fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, pt: Zcu.PerThread) i32 {
- return intStorageAsI32(ip.indexToKey(int).int.storage, pt);
+fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, zcu: *const Zcu) i32 {
+ return intStorageAsI32(ip.indexToKey(int).int.storage, zcu);
}
-fn intStorageAsI32(storage: InternPool.Key.Int.Storage, pt: Zcu.PerThread) i32 {
- const zcu = pt.zcu;
+fn intStorageAsI32(storage: InternPool.Key.Int.Storage, zcu: *const Zcu) i32 {
return switch (storage) {
.i64 => |x| @as(i32, @intCast(x)),
.u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))),
@@ -3478,145 +3370,144 @@ fn intStorageAsI32(storage: InternPool.Key.Int.Storage, pt: Zcu.PerThread) i32 {
};
}
-fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = func.air.extraData(Air.Block, ty_pl.payload);
- try func.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]));
+fn airBlock(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.Block, ty_pl.payload);
+ try cg.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]));
}
-fn lowerBlock(func: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []const Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const wasm_block_ty = genBlockType(block_ty, pt, func.target.*);
+fn lowerBlock(cg: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []const Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const wasm_block_ty = genBlockType(block_ty, zcu, cg.target);
// if wasm_block_ty is non-empty, we create a register to store the temporary value
- const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: {
- const ty: Type = if (isByRef(block_ty, pt, func.target.*)) Type.u32 else block_ty;
- break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten
+ const block_result: WValue = if (wasm_block_ty != .empty) blk: {
+ const ty: Type = if (isByRef(block_ty, zcu, cg.target)) Type.u32 else block_ty;
+ break :blk try cg.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten
} else .none;
- try func.startBlock(.block, wasm.block_empty);
+ try cg.startBlock(.block, .empty);
// Here we set the current block idx, so breaks know the depth to jump
// to when breaking out.
- try func.blocks.putNoClobber(func.gpa, inst, .{
- .label = func.block_depth,
+ try cg.blocks.putNoClobber(cg.gpa, inst, .{
+ .label = cg.block_depth,
.value = block_result,
});
- try func.genBody(body);
- try func.endBlock();
+ try cg.genBody(body);
+ try cg.endBlock();
- const liveness = func.liveness.getBlock(inst);
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths.len);
+ const liveness = cg.liveness.getBlock(inst);
+ try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, liveness.deaths.len);
- return func.finishAir(inst, block_result, &.{});
+ return cg.finishAir(inst, block_result, &.{});
}
/// appends a new wasm block to the code section and increases the `block_depth` by 1
-fn startBlock(func: *CodeGen, block_tag: wasm.Opcode, valtype: u8) !void {
- func.block_depth += 1;
- try func.addInst(.{
+fn startBlock(cg: *CodeGen, block_tag: std.wasm.Opcode, block_type: std.wasm.BlockType) !void {
+ cg.block_depth += 1;
+ try cg.addInst(.{
.tag = Mir.Inst.Tag.fromOpcode(block_tag),
- .data = .{ .block_type = valtype },
+ .data = .{ .block_type = block_type },
});
}
/// Ends the current wasm block and decreases the `block_depth` by 1
-fn endBlock(func: *CodeGen) !void {
- try func.addTag(.end);
- func.block_depth -= 1;
+fn endBlock(cg: *CodeGen) !void {
+ try cg.addTag(.end);
+ cg.block_depth -= 1;
}
-fn airLoop(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const loop = func.air.extraData(Air.Block, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(func.air.extra[loop.end..][0..loop.data.body_len]);
+fn airLoop(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const loop = cg.air.extraData(Air.Block, ty_pl.payload);
+ const body: []const Air.Inst.Index = @ptrCast(cg.air.extra[loop.end..][0..loop.data.body_len]);
// result type of loop is always 'noreturn', meaning we can always
// emit the wasm type 'block_empty'.
- try func.startBlock(.loop, wasm.block_empty);
+ try cg.startBlock(.loop, .empty);
- try func.loops.putNoClobber(func.gpa, inst, func.block_depth);
- defer assert(func.loops.remove(inst));
+ try cg.loops.putNoClobber(cg.gpa, inst, cg.block_depth);
+ defer assert(cg.loops.remove(inst));
- try func.genBody(body);
- try func.endBlock();
+ try cg.genBody(body);
+ try cg.endBlock();
- return func.finishAir(inst, .none, &.{});
+ return cg.finishAir(inst, .none, &.{});
}
-fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
- const condition = try func.resolveInst(pl_op.operand);
- const extra = func.air.extraData(Air.CondBr, pl_op.payload);
- const then_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.then_body_len]);
- const else_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
- const liveness_condbr = func.liveness.getCondBr(inst);
+fn airCondBr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const condition = try cg.resolveInst(pl_op.operand);
+ const extra = cg.air.extraData(Air.CondBr, pl_op.payload);
+ const then_body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end..][0..extra.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
+ const liveness_condbr = cg.liveness.getCondBr(inst);
// result type is always noreturn, so use `block_empty` as type.
- try func.startBlock(.block, wasm.block_empty);
+ try cg.startBlock(.block, .empty);
// emit the conditional value
- try func.emitWValue(condition);
+ try cg.emitWValue(condition);
// we inserted the block in front of the condition
// so now check if condition matches. If not, break outside this block
// and continue with the then codepath
- try func.addLabel(.br_if, 0);
+ try cg.addLabel(.br_if, 0);
- try func.branches.ensureUnusedCapacity(func.gpa, 2);
+ try cg.branches.ensureUnusedCapacity(cg.gpa, 2);
{
- func.branches.appendAssumeCapacity(.{});
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.else_deaths.len)));
+ cg.branches.appendAssumeCapacity(.{});
+ try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, @as(u32, @intCast(liveness_condbr.else_deaths.len)));
defer {
- var else_stack = func.branches.pop();
- else_stack.deinit(func.gpa);
+ var else_stack = cg.branches.pop();
+ else_stack.deinit(cg.gpa);
}
- try func.genBody(else_body);
- try func.endBlock();
+ try cg.genBody(else_body);
+ try cg.endBlock();
}
// Outer block that matches the condition
{
- func.branches.appendAssumeCapacity(.{});
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.then_deaths.len)));
+ cg.branches.appendAssumeCapacity(.{});
+ try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, @as(u32, @intCast(liveness_condbr.then_deaths.len)));
defer {
- var then_stack = func.branches.pop();
- then_stack.deinit(func.gpa);
+ var then_stack = cg.branches.pop();
+ then_stack.deinit(cg.gpa);
}
- try func.genBody(then_body);
+ try cg.genBody(then_body);
}
- return func.finishAir(inst, .none, &.{});
+ return cg.finishAir(inst, .none, &.{});
}
-fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!void {
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airCmp(cg: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!void {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
- const operand_ty = func.typeOf(bin_op.lhs);
- const result = try func.cmp(lhs, rhs, operand_ty, op);
- return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
+ const operand_ty = cg.typeOf(bin_op.lhs);
+ const result = try cg.cmp(lhs, rhs, operand_ty, op);
+ return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
/// Compares two operands.
/// Asserts rhs is not a stack value when the lhs isn't a stack value either
/// NOTE: This leaves the result on top of the stack, rather than a new local.
-fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
+fn cmp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
assert(!(lhs != .stack and rhs == .stack));
- const pt = func.pt;
- const zcu = pt.zcu;
+ const zcu = cg.pt.zcu;
if (ty.zigTypeTag(zcu) == .optional and !ty.optionalReprIsPayload(zcu)) {
const payload_ty = ty.optionalChild(zcu);
if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// When we hit this case, we must check the value of optionals
// that are not pointers. This means first checking against non-null for
// both lhs and rhs, as well as checking the payload are matching of lhs and rhs
- return func.cmpOptionals(lhs, rhs, ty, op);
+ return cg.cmpOptionals(lhs, rhs, ty, op);
}
} else if (ty.isAnyFloat()) {
- return func.cmpFloat(ty, lhs, rhs, op);
- } else if (isByRef(ty, pt, func.target.*)) {
- return func.cmpBigInt(lhs, rhs, ty, op);
+ return cg.cmpFloat(ty, lhs, rhs, op);
+ } else if (isByRef(ty, zcu, cg.target)) {
+ return cg.cmpBigInt(lhs, rhs, ty, op);
}
const signedness: std.builtin.Signedness = blk: {
@@ -3629,11 +3520,11 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
// ensure that when we compare pointers, we emit
// the true pointer of a stack value, rather than the stack pointer.
- try func.lowerToStack(lhs);
- try func.lowerToStack(rhs);
+ try cg.lowerToStack(lhs);
+ try cg.lowerToStack(rhs);
- const opcode: wasm.Opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, pt, func.target.*),
+ const opcode: std.wasm.Opcode = buildOpcode(.{
+ .valtype1 = typeToValtype(ty, zcu, cg.target),
.op = switch (op) {
.lt => .lt,
.lte => .le,
@@ -3644,15 +3535,15 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
},
.signedness = signedness,
});
- try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
}
/// Compares two floats.
/// NOTE: Leaves the result of the comparison on top of the stack.
-fn cmpFloat(func: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math.CompareOperator) InnerError!WValue {
- const float_bits = ty.floatBits(func.target.*);
+fn cmpFloat(cg: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math.CompareOperator) InnerError!WValue {
+ const float_bits = ty.floatBits(cg.target.*);
const op: Op = switch (cmp_op) {
.lt => .lt,
@@ -3665,143 +3556,137 @@ fn cmpFloat(func: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math
switch (float_bits) {
16 => {
- _ = try func.fpext(lhs, Type.f16, Type.f32);
- _ = try func.fpext(rhs, Type.f16, Type.f32);
+ _ = try cg.fpext(lhs, Type.f16, Type.f32);
+ _ = try cg.fpext(rhs, Type.f16, Type.f32);
const opcode = buildOpcode(.{ .op = op, .valtype1 = .f32 });
- try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
},
32, 64 => {
- try func.emitWValue(lhs);
- try func.emitWValue(rhs);
- const val_type: wasm.Valtype = if (float_bits == 32) .f32 else .f64;
+ try cg.emitWValue(lhs);
+ try cg.emitWValue(rhs);
+ const val_type: std.wasm.Valtype = if (float_bits == 32) .f32 else .f64;
const opcode = buildOpcode(.{ .op = op, .valtype1 = val_type });
- try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
},
80, 128 => {
- var fn_name_buf: [32]u8 = undefined;
- const fn_name = std.fmt.bufPrint(&fn_name_buf, "__{s}{s}f2", .{
- @tagName(op), target_util.compilerRtFloatAbbrev(float_bits),
- }) catch unreachable;
-
- const result = try func.callIntrinsic(fn_name, &.{ ty.ip_index, ty.ip_index }, Type.bool, &.{ lhs, rhs });
- return func.cmp(result, .{ .imm32 = 0 }, Type.i32, cmp_op);
+ const intrinsic = floatCmpIntrinsic(cmp_op, float_bits);
+ const result = try cg.callIntrinsic(intrinsic, &.{ ty.ip_index, ty.ip_index }, Type.bool, &.{ lhs, rhs });
+ return cg.cmp(result, .{ .imm32 = 0 }, Type.i32, cmp_op);
},
else => unreachable,
}
}
-fn airCmpVector(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+fn airCmpVector(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
_ = inst;
- return func.fail("TODO implement airCmpVector for wasm", .{});
+ return cg.fail("TODO implement airCmpVector for wasm", .{});
}
-fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try func.resolveInst(un_op);
- const sym_index = try func.bin_file.getGlobalSymbol("__zig_errors_len", null);
- const errors_len: WValue = .{ .memory = @intFromEnum(sym_index) };
+fn airCmpLtErrorsLen(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const operand = try cg.resolveInst(un_op);
- try func.emitWValue(operand);
- const pt = func.pt;
+ try cg.emitWValue(operand);
+ const pt = cg.pt;
const err_int_ty = try pt.errorIntType();
- const errors_len_val = try func.load(errors_len, err_int_ty, 0);
- const result = try func.cmp(.stack, errors_len_val, err_int_ty, .lt);
+ try cg.addTag(.errors_len);
+ const result = try cg.cmp(.stack, .stack, err_int_ty, .lt);
- return func.finishAir(inst, result, &.{un_op});
+ return cg.finishAir(inst, result, &.{un_op});
}
-fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const zcu = func.pt.zcu;
- const br = func.air.instructions.items(.data)[@intFromEnum(inst)].br;
- const block = func.blocks.get(br.block_inst).?;
+fn airBr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const br = cg.air.instructions.items(.data)[@intFromEnum(inst)].br;
+ const block = cg.blocks.get(br.block_inst).?;
// if operand has codegen bits we should break with a value
- if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(zcu)) {
- const operand = try func.resolveInst(br.operand);
- try func.lowerToStack(operand);
+ if (cg.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(zcu)) {
+ const operand = try cg.resolveInst(br.operand);
+ try cg.lowerToStack(operand);
if (block.value != .none) {
- try func.addLabel(.local_set, block.value.local.value);
+ try cg.addLocal(.local_set, block.value.local.value);
}
}
// We map every block to its block index.
// We then determine how far we have to jump to it by subtracting it from current block depth
- const idx: u32 = func.block_depth - block.label;
- try func.addLabel(.br, idx);
+ const idx: u32 = cg.block_depth - block.label;
+ try cg.addLabel(.br, idx);
- return func.finishAir(inst, .none, &.{br.operand});
+ return cg.finishAir(inst, .none, &.{br.operand});
}
-fn airRepeat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const repeat = func.air.instructions.items(.data)[@intFromEnum(inst)].repeat;
- const loop_label = func.loops.get(repeat.loop_inst).?;
+fn airRepeat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const repeat = cg.air.instructions.items(.data)[@intFromEnum(inst)].repeat;
+ const loop_label = cg.loops.get(repeat.loop_inst).?;
- const idx: u32 = func.block_depth - loop_label;
- try func.addLabel(.br, idx);
+ const idx: u32 = cg.block_depth - loop_label;
+ try cg.addLabel(.br, idx);
- return func.finishAir(inst, .none, &.{});
+ return cg.finishAir(inst, .none, &.{});
}
-fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airNot(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const operand_ty = func.typeOf(ty_op.operand);
- const pt = func.pt;
+ const operand = try cg.resolveInst(ty_op.operand);
+ const operand_ty = cg.typeOf(ty_op.operand);
+ const pt = cg.pt;
const zcu = pt.zcu;
const result = result: {
if (operand_ty.zigTypeTag(zcu) == .bool) {
- try func.emitWValue(operand);
- try func.addTag(.i32_eqz);
- const not_tmp = try func.allocLocal(operand_ty);
- try func.addLabel(.local_set, not_tmp.local.value);
+ try cg.emitWValue(operand);
+ try cg.addTag(.i32_eqz);
+ const not_tmp = try cg.allocLocal(operand_ty);
+ try cg.addLocal(.local_set, not_tmp.local.value);
break :result not_tmp;
} else {
const int_info = operand_ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
- return func.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(pt)});
+ return cg.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(pt)});
};
switch (wasm_bits) {
32 => {
- try func.emitWValue(operand);
- try func.addImm32(switch (int_info.signedness) {
+ try cg.emitWValue(operand);
+ try cg.addImm32(switch (int_info.signedness) {
.unsigned => ~@as(u32, 0) >> @intCast(32 - int_info.bits),
.signed => ~@as(u32, 0),
});
- try func.addTag(.i32_xor);
+ try cg.addTag(.i32_xor);
break :result .stack;
},
64 => {
- try func.emitWValue(operand);
- try func.addImm64(switch (int_info.signedness) {
+ try cg.emitWValue(operand);
+ try cg.addImm64(switch (int_info.signedness) {
.unsigned => ~@as(u64, 0) >> @intCast(64 - int_info.bits),
.signed => ~@as(u64, 0),
});
- try func.addTag(.i64_xor);
+ try cg.addTag(.i64_xor);
break :result .stack;
},
128 => {
- const ptr = try func.allocStack(operand_ty);
+ const ptr = try cg.allocStack(operand_ty);
- try func.emitWValue(ptr);
- _ = try func.load(operand, Type.u64, 0);
- try func.addImm64(~@as(u64, 0));
- try func.addTag(.i64_xor);
- try func.store(.stack, .stack, Type.u64, ptr.offset());
+ try cg.emitWValue(ptr);
+ _ = try cg.load(operand, Type.u64, 0);
+ try cg.addImm64(~@as(u64, 0));
+ try cg.addTag(.i64_xor);
+ try cg.store(.stack, .stack, Type.u64, ptr.offset());
- try func.emitWValue(ptr);
- _ = try func.load(operand, Type.u64, 8);
- try func.addImm64(switch (int_info.signedness) {
+ try cg.emitWValue(ptr);
+ _ = try cg.load(operand, Type.u64, 8);
+ try cg.addImm64(switch (int_info.signedness) {
.unsigned => ~@as(u64, 0) >> @intCast(128 - int_info.bits),
.signed => ~@as(u64, 0),
});
- try func.addTag(.i64_xor);
- try func.store(.stack, .stack, Type.u64, ptr.offset() + 8);
+ try cg.addTag(.i64_xor);
+ try cg.store(.stack, .stack, Type.u64, ptr.offset() + 8);
break :result ptr;
},
@@ -3809,33 +3694,32 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
}
};
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airTrap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- try func.addTag(.@"unreachable");
- return func.finishAir(inst, .none, &.{});
+fn airTrap(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ try cg.addTag(.@"unreachable");
+ return cg.finishAir(inst, .none, &.{});
}
-fn airBreakpoint(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+fn airBreakpoint(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// unsupported by wasm itfunc. Can be implemented once we support DWARF
// for wasm
- try func.addTag(.@"unreachable");
- return func.finishAir(inst, .none, &.{});
+ try cg.addTag(.@"unreachable");
+ return cg.finishAir(inst, .none, &.{});
}
-fn airUnreachable(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- try func.addTag(.@"unreachable");
- return func.finishAir(inst, .none, &.{});
+fn airUnreachable(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ try cg.addTag(.@"unreachable");
+ return cg.finishAir(inst, .none, &.{});
}
-fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const wanted_ty = func.typeOfIndex(inst);
- const given_ty = func.typeOf(ty_op.operand);
+fn airBitcast(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand = try cg.resolveInst(ty_op.operand);
+ const wanted_ty = cg.typeOfIndex(inst);
+ const given_ty = cg.typeOf(ty_op.operand);
const bit_size = given_ty.bitSize(zcu);
const needs_wrapping = (given_ty.isSignedInt(zcu) != wanted_ty.isSignedInt(zcu)) and
@@ -3843,39 +3727,38 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result = result: {
if (given_ty.isAnyFloat() or wanted_ty.isAnyFloat()) {
- break :result try func.bitcast(wanted_ty, given_ty, operand);
+ break :result try cg.bitcast(wanted_ty, given_ty, operand);
}
- if (isByRef(given_ty, pt, func.target.*) and !isByRef(wanted_ty, pt, func.target.*)) {
- const loaded_memory = try func.load(operand, wanted_ty, 0);
+ if (isByRef(given_ty, zcu, cg.target) and !isByRef(wanted_ty, zcu, cg.target)) {
+ const loaded_memory = try cg.load(operand, wanted_ty, 0);
if (needs_wrapping) {
- break :result try func.wrapOperand(loaded_memory, wanted_ty);
+ break :result try cg.wrapOperand(loaded_memory, wanted_ty);
} else {
break :result loaded_memory;
}
}
- if (!isByRef(given_ty, pt, func.target.*) and isByRef(wanted_ty, pt, func.target.*)) {
- const stack_memory = try func.allocStack(wanted_ty);
- try func.store(stack_memory, operand, given_ty, 0);
+ if (!isByRef(given_ty, zcu, cg.target) and isByRef(wanted_ty, zcu, cg.target)) {
+ const stack_memory = try cg.allocStack(wanted_ty);
+ try cg.store(stack_memory, operand, given_ty, 0);
if (needs_wrapping) {
- break :result try func.wrapOperand(stack_memory, wanted_ty);
+ break :result try cg.wrapOperand(stack_memory, wanted_ty);
} else {
break :result stack_memory;
}
}
if (needs_wrapping) {
- break :result try func.wrapOperand(operand, wanted_ty);
+ break :result try cg.wrapOperand(operand, wanted_ty);
}
- break :result func.reuseOperand(ty_op.operand, operand);
+ break :result cg.reuseOperand(ty_op.operand, operand);
};
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn bitcast(cg: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue {
+ const zcu = cg.pt.zcu;
// if we bitcast a float to or from an integer we must use the 'reinterpret' instruction
if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand;
if (wanted_ty.ip_index == .f16_type or given_ty.ip_index == .f16_type) return operand;
@@ -3884,41 +3767,39 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn
const opcode = buildOpcode(.{
.op = .reinterpret,
- .valtype1 = typeToValtype(wanted_ty, pt, func.target.*),
- .valtype2 = typeToValtype(given_ty, pt, func.target.*),
+ .valtype1 = typeToValtype(wanted_ty, zcu, cg.target),
+ .valtype2 = typeToValtype(given_ty, zcu, cg.target),
});
- try func.emitWValue(operand);
- try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+ try cg.emitWValue(operand);
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
}
-fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = func.air.extraData(Air.StructField, ty_pl.payload);
+fn airStructFieldPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.StructField, ty_pl.payload);
- const struct_ptr = try func.resolveInst(extra.data.struct_operand);
- const struct_ptr_ty = func.typeOf(extra.data.struct_operand);
+ const struct_ptr = try cg.resolveInst(extra.data.struct_operand);
+ const struct_ptr_ty = cg.typeOf(extra.data.struct_operand);
const struct_ty = struct_ptr_ty.childType(zcu);
- const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ptr_ty, struct_ty, extra.data.field_index);
- return func.finishAir(inst, result, &.{extra.data.struct_operand});
+ const result = try cg.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ptr_ty, struct_ty, extra.data.field_index);
+ return cg.finishAir(inst, result, &.{extra.data.struct_operand});
}
-fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const struct_ptr = try func.resolveInst(ty_op.operand);
- const struct_ptr_ty = func.typeOf(ty_op.operand);
+fn airStructFieldPtrIndex(cg: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const struct_ptr = try cg.resolveInst(ty_op.operand);
+ const struct_ptr_ty = cg.typeOf(ty_op.operand);
const struct_ty = struct_ptr_ty.childType(zcu);
- const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ptr_ty, struct_ty, index);
- return func.finishAir(inst, result, &.{ty_op.operand});
+ const result = try cg.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ptr_ty, struct_ty, index);
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
fn structFieldPtr(
- func: *CodeGen,
+ cg: *CodeGen,
inst: Air.Inst.Index,
ref: Air.Inst.Ref,
struct_ptr: WValue,
@@ -3926,9 +3807,9 @@ fn structFieldPtr(
struct_ty: Type,
index: u32,
) InnerError!WValue {
- const pt = func.pt;
+ const pt = cg.pt;
const zcu = pt.zcu;
- const result_ty = func.typeOfIndex(inst);
+ const result_ty = cg.typeOfIndex(inst);
const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(zcu);
const offset = switch (struct_ty.containerLayout(zcu)) {
@@ -3947,28 +3828,28 @@ fn structFieldPtr(
};
// save a load and store when we can simply reuse the operand
if (offset == 0) {
- return func.reuseOperand(ref, struct_ptr);
+ return cg.reuseOperand(ref, struct_ptr);
}
switch (struct_ptr) {
.stack_offset => |stack_offset| {
return .{ .stack_offset = .{ .value = stack_offset.value + @as(u32, @intCast(offset)), .references = 1 } };
},
- else => return func.buildPointerOffset(struct_ptr, offset, .new),
+ else => return cg.buildPointerOffset(struct_ptr, offset, .new),
}
}
-fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+fn airStructFieldVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pt = cg.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const struct_field = cg.air.extraData(Air.StructField, ty_pl.payload).data;
- const struct_ty = func.typeOf(struct_field.struct_operand);
- const operand = try func.resolveInst(struct_field.struct_operand);
+ const struct_ty = cg.typeOf(struct_field.struct_operand);
+ const operand = try cg.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
const field_ty = struct_ty.fieldType(field_index, zcu);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return cg.finishAir(inst, .none, &.{struct_field.struct_operand});
const result: WValue = switch (struct_ty.containerLayout(zcu)) {
.@"packed" => switch (struct_ty.zigTypeTag(zcu)) {
@@ -3977,42 +3858,42 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const offset = pt.structPackedFieldBitOffset(packed_struct, field_index);
const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
const wasm_bits = toWasmBits(backing_ty.intInfo(zcu).bits) orelse {
- return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
+ return cg.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
};
const const_wvalue: WValue = if (wasm_bits == 32)
.{ .imm32 = offset }
else if (wasm_bits == 64)
.{ .imm64 = offset }
else
- return func.fail("TODO: airStructFieldVal for packed structs larger than 64 bits", .{});
+ return cg.fail("TODO: airStructFieldVal for packed structs larger than 64 bits", .{});
// for first field we don't require any shifting
const shifted_value = if (offset == 0)
operand
else
- try func.binOp(operand, const_wvalue, backing_ty, .shr);
+ try cg.binOp(operand, const_wvalue, backing_ty, .shr);
if (field_ty.zigTypeTag(zcu) == .float) {
const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
- const truncated = try func.trunc(shifted_value, int_type, backing_ty);
- break :result try func.bitcast(field_ty, int_type, truncated);
+ const truncated = try cg.trunc(shifted_value, int_type, backing_ty);
+ break :result try cg.bitcast(field_ty, int_type, truncated);
} else if (field_ty.isPtrAtRuntime(zcu) and packed_struct.field_types.len == 1) {
// In this case we do not have to perform any transformations,
// we can simply reuse the operand.
- break :result func.reuseOperand(struct_field.struct_operand, operand);
+ break :result cg.reuseOperand(struct_field.struct_operand, operand);
} else if (field_ty.isPtrAtRuntime(zcu)) {
const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
- break :result try func.trunc(shifted_value, int_type, backing_ty);
+ break :result try cg.trunc(shifted_value, int_type, backing_ty);
}
- break :result try func.trunc(shifted_value, field_ty, backing_ty);
+ break :result try cg.trunc(shifted_value, field_ty, backing_ty);
},
.@"union" => result: {
- if (isByRef(struct_ty, pt, func.target.*)) {
- if (!isByRef(field_ty, pt, func.target.*)) {
- break :result try func.load(operand, field_ty, 0);
+ if (isByRef(struct_ty, zcu, cg.target)) {
+ if (!isByRef(field_ty, zcu, cg.target)) {
+ break :result try cg.load(operand, field_ty, 0);
} else {
- const new_stack_val = try func.allocStack(field_ty);
- try func.store(new_stack_val, operand, field_ty, 0);
+ const new_stack_val = try cg.allocStack(field_ty);
+ try cg.store(new_stack_val, operand, field_ty, 0);
break :result new_stack_val;
}
}
@@ -4020,45 +3901,45 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(zcu))));
if (field_ty.zigTypeTag(zcu) == .float) {
const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
- const truncated = try func.trunc(operand, int_type, union_int_type);
- break :result try func.bitcast(field_ty, int_type, truncated);
+ const truncated = try cg.trunc(operand, int_type, union_int_type);
+ break :result try cg.bitcast(field_ty, int_type, truncated);
} else if (field_ty.isPtrAtRuntime(zcu)) {
const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
- break :result try func.trunc(operand, int_type, union_int_type);
+ break :result try cg.trunc(operand, int_type, union_int_type);
}
- break :result try func.trunc(operand, field_ty, union_int_type);
+ break :result try cg.trunc(operand, field_ty, union_int_type);
},
else => unreachable,
},
else => result: {
const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, zcu)) orelse {
- return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)});
+ return cg.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)});
};
- if (isByRef(field_ty, pt, func.target.*)) {
+ if (isByRef(field_ty, zcu, cg.target)) {
switch (operand) {
.stack_offset => |stack_offset| {
break :result .{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } };
},
- else => break :result try func.buildPointerOffset(operand, offset, .new),
+ else => break :result try cg.buildPointerOffset(operand, offset, .new),
}
}
- break :result try func.load(operand, field_ty, offset);
+ break :result try cg.load(operand, field_ty, offset);
},
};
- return func.finishAir(inst, result, &.{struct_field.struct_operand});
+ return cg.finishAir(inst, result, &.{struct_field.struct_operand});
}
-fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+fn airSwitchBr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pt = cg.pt;
const zcu = pt.zcu;
// result type is always 'noreturn'
- const blocktype = wasm.block_empty;
- const switch_br = func.air.unwrapSwitch(inst);
- const target = try func.resolveInst(switch_br.operand);
- const target_ty = func.typeOf(switch_br.operand);
- const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.cases_len + 1);
- defer func.gpa.free(liveness.deaths);
+ const blocktype: std.wasm.BlockType = .empty;
+ const switch_br = cg.air.unwrapSwitch(inst);
+ const target = try cg.resolveInst(switch_br.operand);
+ const target_ty = cg.typeOf(switch_br.operand);
+ const liveness = try cg.liveness.getSwitchBr(cg.gpa, inst, switch_br.cases_len + 1);
+ defer cg.gpa.free(liveness.deaths);
// a list that maps each value with its value and body based on the order inside the list.
const CaseValue = union(enum) {
@@ -4068,21 +3949,21 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var case_list = try std.ArrayList(struct {
values: []const CaseValue,
body: []const Air.Inst.Index,
- }).initCapacity(func.gpa, switch_br.cases_len);
+ }).initCapacity(cg.gpa, switch_br.cases_len);
defer for (case_list.items) |case| {
- func.gpa.free(case.values);
+ cg.gpa.free(case.values);
} else case_list.deinit();
var lowest_maybe: ?i32 = null;
var highest_maybe: ?i32 = null;
var it = switch_br.iterateCases();
while (it.next()) |case| {
- const values = try func.gpa.alloc(CaseValue, case.items.len + case.ranges.len);
- errdefer func.gpa.free(values);
+ const values = try cg.gpa.alloc(CaseValue, case.items.len + case.ranges.len);
+ errdefer cg.gpa.free(values);
for (case.items, 0..) |ref, i| {
- const item_val = (try func.air.value(ref, pt)).?;
- const int_val = func.valueAsI32(item_val);
+ const item_val = (try cg.air.value(ref, pt)).?;
+ const int_val = cg.valueAsI32(item_val);
if (lowest_maybe == null or int_val < lowest_maybe.?) {
lowest_maybe = int_val;
}
@@ -4093,15 +3974,15 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
for (case.ranges, 0..) |range, i| {
- const min_val = (try func.air.value(range[0], pt)).?;
- const int_min_val = func.valueAsI32(min_val);
+ const min_val = (try cg.air.value(range[0], pt)).?;
+ const int_min_val = cg.valueAsI32(min_val);
if (lowest_maybe == null or int_min_val < lowest_maybe.?) {
lowest_maybe = int_min_val;
}
- const max_val = (try func.air.value(range[1], pt)).?;
- const int_max_val = func.valueAsI32(max_val);
+ const max_val = (try cg.air.value(range[1], pt)).?;
+ const int_max_val = cg.valueAsI32(max_val);
if (highest_maybe == null or int_max_val > highest_maybe.?) {
highest_maybe = int_max_val;
@@ -4116,7 +3997,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
case_list.appendAssumeCapacity(.{ .values = values, .body = case.body });
- try func.startBlock(.block, blocktype);
+ try cg.startBlock(.block, blocktype);
}
// When highest and lowest are null, we have no cases and can use a jump table
@@ -4132,7 +4013,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const else_body = it.elseBody();
const has_else_body = else_body.len != 0;
if (has_else_body) {
- try func.startBlock(.block, blocktype);
+ try cg.startBlock(.block, blocktype);
}
if (!is_sparse) {
@@ -4140,25 +4021,25 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// The value 'target' represents the index into the table.
// Each index in the table represents a label to the branch
// to jump to.
- try func.startBlock(.block, blocktype);
- try func.emitWValue(target);
+ try cg.startBlock(.block, blocktype);
+ try cg.emitWValue(target);
if (lowest < 0) {
// since br_table works using indexes, starting from '0', we must ensure all values
// we put inside, are atleast 0.
- try func.addImm32(@bitCast(lowest * -1));
- try func.addTag(.i32_add);
+ try cg.addImm32(@bitCast(lowest * -1));
+ try cg.addTag(.i32_add);
} else if (lowest > 0) {
// make the index start from 0 by substracting the lowest value
- try func.addImm32(@bitCast(lowest));
- try func.addTag(.i32_sub);
+ try cg.addImm32(@bitCast(lowest));
+ try cg.addTag(.i32_sub);
}
// Account for default branch so always add '1'
const depth = @as(u32, @intCast(highest - lowest + @intFromBool(has_else_body))) + 1;
const jump_table: Mir.JumpTable = .{ .length = depth };
- const table_extra_index = try func.addExtra(jump_table);
- try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
- try func.mir_extra.ensureUnusedCapacity(func.gpa, depth);
+ const table_extra_index = try cg.addExtra(jump_table);
+ try cg.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
+ try cg.mir_extra.ensureUnusedCapacity(cg.gpa, depth);
var value = lowest;
while (value <= highest) : (value += 1) {
// idx represents the branch we jump to
@@ -4179,78 +4060,77 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// by using a jump table for this instead of if-else chains.
break :blk if (has_else_body or target_ty.zigTypeTag(zcu) == .error_set) switch_br.cases_len else unreachable;
};
- func.mir_extra.appendAssumeCapacity(idx);
+ cg.mir_extra.appendAssumeCapacity(idx);
} else if (has_else_body) {
- func.mir_extra.appendAssumeCapacity(switch_br.cases_len); // default branch
+ cg.mir_extra.appendAssumeCapacity(switch_br.cases_len); // default branch
}
- try func.endBlock();
+ try cg.endBlock();
}
- try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @intFromBool(has_else_body));
+ try cg.branches.ensureUnusedCapacity(cg.gpa, case_list.items.len + @intFromBool(has_else_body));
for (case_list.items, 0..) |case, index| {
// when sparse, we use if/else-chain, so emit conditional checks
if (is_sparse) {
// for single value prong we can emit a simple condition
if (case.values.len == 1 and case.values[0] == .singular) {
- const val = try func.lowerConstant(case.values[0].singular.value, target_ty);
+ const val = try cg.lowerConstant(case.values[0].singular.value, target_ty);
// not equal, because we want to jump out of this block if it does not match the condition.
- _ = try func.cmp(target, val, target_ty, .neq);
- try func.addLabel(.br_if, 0);
+ _ = try cg.cmp(target, val, target_ty, .neq);
+ try cg.addLabel(.br_if, 0);
} else {
// in multi-value prongs we must check if any prongs match the target value.
- try func.startBlock(.block, blocktype);
+ try cg.startBlock(.block, blocktype);
for (case.values) |value| {
switch (value) {
.singular => |single_val| {
- const val = try func.lowerConstant(single_val.value, target_ty);
- _ = try func.cmp(target, val, target_ty, .eq);
+ const val = try cg.lowerConstant(single_val.value, target_ty);
+ _ = try cg.cmp(target, val, target_ty, .eq);
},
.range => |range| {
- const min_val = try func.lowerConstant(range.min_value, target_ty);
- const max_val = try func.lowerConstant(range.max_value, target_ty);
+ const min_val = try cg.lowerConstant(range.min_value, target_ty);
+ const max_val = try cg.lowerConstant(range.max_value, target_ty);
- const gte = try func.cmp(target, min_val, target_ty, .gte);
- const lte = try func.cmp(target, max_val, target_ty, .lte);
- _ = try func.binOp(gte, lte, Type.bool, .@"and");
+ const gte = try cg.cmp(target, min_val, target_ty, .gte);
+ const lte = try cg.cmp(target, max_val, target_ty, .lte);
+ _ = try cg.binOp(gte, lte, Type.bool, .@"and");
},
}
- try func.addLabel(.br_if, 0);
+ try cg.addLabel(.br_if, 0);
}
// value did not match any of the prong values
- try func.addLabel(.br, 1);
- try func.endBlock();
+ try cg.addLabel(.br, 1);
+ try cg.endBlock();
}
}
- func.branches.appendAssumeCapacity(.{});
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[index].len);
+ cg.branches.appendAssumeCapacity(.{});
+ try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, liveness.deaths[index].len);
defer {
- var case_branch = func.branches.pop();
- case_branch.deinit(func.gpa);
+ var case_branch = cg.branches.pop();
+ case_branch.deinit(cg.gpa);
}
- try func.genBody(case.body);
- try func.endBlock();
+ try cg.genBody(case.body);
+ try cg.endBlock();
}
if (has_else_body) {
- func.branches.appendAssumeCapacity(.{});
+ cg.branches.appendAssumeCapacity(.{});
const else_deaths = liveness.deaths.len - 1;
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[else_deaths].len);
+ try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, liveness.deaths[else_deaths].len);
defer {
- var else_branch = func.branches.pop();
- else_branch.deinit(func.gpa);
+ var else_branch = cg.branches.pop();
+ else_branch.deinit(cg.gpa);
}
- try func.genBody(else_body);
- try func.endBlock();
+ try cg.genBody(else_body);
+ try cg.endBlock();
}
- return func.finishAir(inst, .none, &.{});
+ return cg.finishAir(inst, .none, &.{});
}
-fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try func.resolveInst(un_op);
- const err_union_ty = func.typeOf(un_op);
+fn airIsErr(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const operand = try cg.resolveInst(un_op);
+ const err_union_ty = cg.typeOf(un_op);
const pl_ty = err_union_ty.errorUnionPayload(zcu);
const result: WValue = result: {
@@ -4262,57 +4142,55 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
}
}
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
if (pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- try func.addMemArg(.i32_load16_u, .{
+ try cg.addMemArg(.i32_load16_u, .{
.offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, zcu))),
.alignment = @intCast(Type.anyerror.abiAlignment(zcu).toByteUnits().?),
});
}
// Compare the error value with '0'
- try func.addImm32(0);
- try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+ try cg.addImm32(0);
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
break :result .stack;
};
- return func.finishAir(inst, result, &.{un_op});
+ return cg.finishAir(inst, result, &.{un_op});
}
-fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airUnwrapErrUnionPayload(cg: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const op_ty = func.typeOf(ty_op.operand);
+ const operand = try cg.resolveInst(ty_op.operand);
+ const op_ty = cg.typeOf(ty_op.operand);
const err_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty;
const payload_ty = err_ty.errorUnionPayload(zcu);
const result: WValue = result: {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
if (op_is_ptr) {
- break :result func.reuseOperand(ty_op.operand, operand);
+ break :result cg.reuseOperand(ty_op.operand, operand);
}
break :result .none;
}
const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu)));
- if (op_is_ptr or isByRef(payload_ty, pt, func.target.*)) {
- break :result try func.buildPointerOffset(operand, pl_offset, .new);
+ if (op_is_ptr or isByRef(payload_ty, zcu, cg.target)) {
+ break :result try cg.buildPointerOffset(operand, pl_offset, .new);
}
- break :result try func.load(operand, payload_ty, pl_offset);
+ break :result try cg.load(operand, payload_ty, pl_offset);
};
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airUnwrapErrUnionError(cg: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const op_ty = func.typeOf(ty_op.operand);
+ const operand = try cg.resolveInst(ty_op.operand);
+ const op_ty = cg.typeOf(ty_op.operand);
const err_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty;
const payload_ty = err_ty.errorUnionPayload(zcu);
@@ -4322,104 +4200,101 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool)
}
if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- break :result func.reuseOperand(ty_op.operand, operand);
+ break :result cg.reuseOperand(ty_op.operand, operand);
}
- break :result try func.load(operand, Type.anyerror, @intCast(errUnionErrorOffset(payload_ty, zcu)));
+ break :result try cg.load(operand, Type.anyerror, @intCast(errUnionErrorOffset(payload_ty, zcu)));
};
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const zcu = func.pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airWrapErrUnionPayload(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const err_ty = func.typeOfIndex(inst);
+ const operand = try cg.resolveInst(ty_op.operand);
+ const err_ty = cg.typeOfIndex(inst);
- const pl_ty = func.typeOf(ty_op.operand);
+ const pl_ty = cg.typeOf(ty_op.operand);
const result = result: {
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- break :result func.reuseOperand(ty_op.operand, operand);
+ break :result cg.reuseOperand(ty_op.operand, operand);
}
- const err_union = try func.allocStack(err_ty);
- const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new);
- try func.store(payload_ptr, operand, pl_ty, 0);
+ const err_union = try cg.allocStack(err_ty);
+ const payload_ptr = try cg.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new);
+ try cg.store(payload_ptr, operand, pl_ty, 0);
// ensure we also write '0' to the error part, so any present stack value gets overwritten by it.
- try func.emitWValue(err_union);
- try func.addImm32(0);
+ try cg.emitWValue(err_union);
+ try cg.addImm32(0);
const err_val_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
- try func.addMemArg(.i32_store16, .{
+ try cg.addMemArg(.i32_store16, .{
.offset = err_union.offset() + err_val_offset,
.alignment = 2,
});
break :result err_union;
};
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airWrapErrUnionErr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
+ const operand = try cg.resolveInst(ty_op.operand);
const err_ty = ty_op.ty.toType();
const pl_ty = err_ty.errorUnionPayload(zcu);
const result = result: {
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- break :result func.reuseOperand(ty_op.operand, operand);
+ break :result cg.reuseOperand(ty_op.operand, operand);
}
- const err_union = try func.allocStack(err_ty);
+ const err_union = try cg.allocStack(err_ty);
// store error value
- try func.store(err_union, operand, Type.anyerror, @intCast(errUnionErrorOffset(pl_ty, zcu)));
+ try cg.store(err_union, operand, Type.anyerror, @intCast(errUnionErrorOffset(pl_ty, zcu)));
// write 'undefined' to the payload
- const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new);
+ const payload_ptr = try cg.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new);
const len = @as(u32, @intCast(err_ty.errorUnionPayload(zcu).abiSize(zcu)));
- try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa });
+ try cg.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa });
break :result err_union;
};
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airIntcast(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ty = ty_op.ty.toType();
- const operand = try func.resolveInst(ty_op.operand);
- const operand_ty = func.typeOf(ty_op.operand);
- const pt = func.pt;
- const zcu = pt.zcu;
+ const operand = try cg.resolveInst(ty_op.operand);
+ const operand_ty = cg.typeOf(ty_op.operand);
+ const zcu = cg.pt.zcu;
if (ty.zigTypeTag(zcu) == .vector or operand_ty.zigTypeTag(zcu) == .vector) {
- return func.fail("todo Wasm intcast for vectors", .{});
+ return cg.fail("todo Wasm intcast for vectors", .{});
}
if (ty.abiSize(zcu) > 16 or operand_ty.abiSize(zcu) > 16) {
- return func.fail("todo Wasm intcast for bitsize > 128", .{});
+ return cg.fail("todo Wasm intcast for bitsize > 128", .{});
}
const op_bits = toWasmBits(@intCast(operand_ty.bitSize(zcu))).?;
const wanted_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?;
const result = if (op_bits == wanted_bits)
- func.reuseOperand(ty_op.operand, operand)
+ cg.reuseOperand(ty_op.operand, operand)
else
- try func.intcast(operand, operand_ty, ty);
+ try cg.intcast(operand, operand_ty, ty);
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
/// Upcasts or downcasts an integer based on the given and wanted types,
/// and stores the result in a new operand.
/// Asserts type's bitsize <= 128
/// NOTE: May leave the result on the top of the stack.
-fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn intcast(cg: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
+ const zcu = cg.pt.zcu;
const given_bitsize = @as(u16, @intCast(given.bitSize(zcu)));
const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(zcu)));
assert(given_bitsize <= 128);
@@ -4432,470 +4307,456 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
}
if (op_bits == 64 and wanted_bits == 32) {
- try func.emitWValue(operand);
- try func.addTag(.i32_wrap_i64);
+ try cg.emitWValue(operand);
+ try cg.addTag(.i32_wrap_i64);
return .stack;
} else if (op_bits == 32 and wanted_bits == 64) {
- try func.emitWValue(operand);
- try func.addTag(if (wanted.isSignedInt(zcu)) .i64_extend_i32_s else .i64_extend_i32_u);
+ try cg.emitWValue(operand);
+ try cg.addTag(if (wanted.isSignedInt(zcu)) .i64_extend_i32_s else .i64_extend_i32_u);
return .stack;
} else if (wanted_bits == 128) {
// for 128bit integers we store the integer in the virtual stack, rather than a local
- const stack_ptr = try func.allocStack(wanted);
- try func.emitWValue(stack_ptr);
+ const stack_ptr = try cg.allocStack(wanted);
+ try cg.emitWValue(stack_ptr);
// for 32 bit integers, we first coerce the value into a 64 bit integer before storing it
// meaning less store operations are required.
const lhs = if (op_bits == 32) blk: {
const sign_ty = if (wanted.isSignedInt(zcu)) Type.i64 else Type.u64;
- break :blk try (try func.intcast(operand, given, sign_ty)).toLocal(func, sign_ty);
+ break :blk try (try cg.intcast(operand, given, sign_ty)).toLocal(cg, sign_ty);
} else operand;
// store lsb first
- try func.store(.stack, lhs, Type.u64, 0 + stack_ptr.offset());
+ try cg.store(.stack, lhs, Type.u64, 0 + stack_ptr.offset());
// For signed integers we shift lsb by 63 (64bit integer - 1 sign bit) and store remaining value
if (wanted.isSignedInt(zcu)) {
- try func.emitWValue(stack_ptr);
- const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr);
- try func.store(.stack, shr, Type.u64, 8 + stack_ptr.offset());
+ try cg.emitWValue(stack_ptr);
+ const shr = try cg.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr);
+ try cg.store(.stack, shr, Type.u64, 8 + stack_ptr.offset());
} else {
// Ensure memory of msb is zero'd
- try func.store(stack_ptr, .{ .imm64 = 0 }, Type.u64, 8);
+ try cg.store(stack_ptr, .{ .imm64 = 0 }, Type.u64, 8);
}
return stack_ptr;
- } else return func.load(operand, wanted, 0);
+ } else return cg.load(operand, wanted, 0);
}
-fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try func.resolveInst(un_op);
+fn airIsNull(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const operand = try cg.resolveInst(un_op);
- const op_ty = func.typeOf(un_op);
+ const op_ty = cg.typeOf(un_op);
const optional_ty = if (op_kind == .ptr) op_ty.childType(zcu) else op_ty;
- const result = try func.isNull(operand, optional_ty, opcode);
- return func.finishAir(inst, result, &.{un_op});
+ const result = try cg.isNull(operand, optional_ty, opcode);
+ return cg.finishAir(inst, result, &.{un_op});
}
/// For a given type and operand, checks if it's considered `null`.
/// NOTE: Leaves the result on the stack
-fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
- const pt = func.pt;
+fn isNull(cg: *CodeGen, operand: WValue, optional_ty: Type, opcode: std.wasm.Opcode) InnerError!WValue {
+ const pt = cg.pt;
const zcu = pt.zcu;
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
const payload_ty = optional_ty.optionalChild(zcu);
if (!optional_ty.optionalReprIsPayload(zcu)) {
// When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value
if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
- return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(pt)});
+ return cg.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(pt)});
};
- try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
+ try cg.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
}
} else if (payload_ty.isSlice(zcu)) {
- switch (func.arch()) {
- .wasm32 => try func.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }),
- .wasm64 => try func.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }),
- else => unreachable,
+ switch (cg.ptr_size) {
+ .wasm32 => try cg.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }),
+ .wasm64 => try cg.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }),
}
}
// Compare the null value with '0'
- try func.addImm32(0);
- try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+ try cg.addImm32(0);
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
}
-fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const opt_ty = func.typeOf(ty_op.operand);
- const payload_ty = func.typeOfIndex(inst);
+fn airOptionalPayload(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const opt_ty = cg.typeOf(ty_op.operand);
+ const payload_ty = cg.typeOfIndex(inst);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- return func.finishAir(inst, .none, &.{ty_op.operand});
+ return cg.finishAir(inst, .none, &.{ty_op.operand});
}
const result = result: {
- const operand = try func.resolveInst(ty_op.operand);
- if (opt_ty.optionalReprIsPayload(zcu)) break :result func.reuseOperand(ty_op.operand, operand);
+ const operand = try cg.resolveInst(ty_op.operand);
+ if (opt_ty.optionalReprIsPayload(zcu)) break :result cg.reuseOperand(ty_op.operand, operand);
- if (isByRef(payload_ty, pt, func.target.*)) {
- break :result try func.buildPointerOffset(operand, 0, .new);
+ if (isByRef(payload_ty, zcu, cg.target)) {
+ break :result try cg.buildPointerOffset(operand, 0, .new);
}
- break :result try func.load(operand, payload_ty, 0);
+ break :result try cg.load(operand, payload_ty, 0);
};
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const opt_ty = func.typeOf(ty_op.operand).childType(zcu);
+fn airOptionalPayloadPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand = try cg.resolveInst(ty_op.operand);
+ const opt_ty = cg.typeOf(ty_op.operand).childType(zcu);
const result = result: {
const payload_ty = opt_ty.optionalChild(zcu);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu) or opt_ty.optionalReprIsPayload(zcu)) {
- break :result func.reuseOperand(ty_op.operand, operand);
+ break :result cg.reuseOperand(ty_op.operand, operand);
}
- break :result try func.buildPointerOffset(operand, 0, .new);
+ break :result try cg.buildPointerOffset(operand, 0, .new);
};
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+fn airOptionalPayloadPtrSet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pt = cg.pt;
const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const opt_ty = func.typeOf(ty_op.operand).childType(zcu);
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand = try cg.resolveInst(ty_op.operand);
+ const opt_ty = cg.typeOf(ty_op.operand).childType(zcu);
const payload_ty = opt_ty.optionalChild(zcu);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
+ return cg.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
}
if (opt_ty.optionalReprIsPayload(zcu)) {
- return func.finishAir(inst, operand, &.{ty_op.operand});
+ return cg.finishAir(inst, operand, &.{ty_op.operand});
}
const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
- return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(pt)});
+ return cg.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(pt)});
};
- try func.emitWValue(operand);
- try func.addImm32(1);
- try func.addMemArg(.i32_store8, .{ .offset = operand.offset() + offset, .alignment = 1 });
+ try cg.emitWValue(operand);
+ try cg.addImm32(1);
+ try cg.addMemArg(.i32_store8, .{ .offset = operand.offset() + offset, .alignment = 1 });
- const result = try func.buildPointerOffset(operand, 0, .new);
- return func.finishAir(inst, result, &.{ty_op.operand});
+ const result = try cg.buildPointerOffset(operand, 0, .new);
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const payload_ty = func.typeOf(ty_op.operand);
- const pt = func.pt;
+fn airWrapOptional(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const payload_ty = cg.typeOf(ty_op.operand);
+ const pt = cg.pt;
const zcu = pt.zcu;
const result = result: {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- const non_null_bit = try func.allocStack(Type.u1);
- try func.emitWValue(non_null_bit);
- try func.addImm32(1);
- try func.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 });
+ const non_null_bit = try cg.allocStack(Type.u1);
+ try cg.emitWValue(non_null_bit);
+ try cg.addImm32(1);
+ try cg.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 });
break :result non_null_bit;
}
- const operand = try func.resolveInst(ty_op.operand);
- const op_ty = func.typeOfIndex(inst);
+ const operand = try cg.resolveInst(ty_op.operand);
+ const op_ty = cg.typeOfIndex(inst);
if (op_ty.optionalReprIsPayload(zcu)) {
- break :result func.reuseOperand(ty_op.operand, operand);
+ break :result cg.reuseOperand(ty_op.operand, operand);
}
const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
- return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(pt)});
+ return cg.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(pt)});
};
// Create optional type, set the non-null bit, and store the operand inside the optional type
- const result_ptr = try func.allocStack(op_ty);
- try func.emitWValue(result_ptr);
- try func.addImm32(1);
- try func.addMemArg(.i32_store8, .{ .offset = result_ptr.offset() + offset, .alignment = 1 });
+ const result_ptr = try cg.allocStack(op_ty);
+ try cg.emitWValue(result_ptr);
+ try cg.addImm32(1);
+ try cg.addMemArg(.i32_store8, .{ .offset = result_ptr.offset() + offset, .alignment = 1 });
- const payload_ptr = try func.buildPointerOffset(result_ptr, 0, .new);
- try func.store(payload_ptr, operand, payload_ty, 0);
+ const payload_ptr = try cg.buildPointerOffset(result_ptr, 0, .new);
+ try cg.store(payload_ptr, operand, payload_ty, 0);
break :result result_ptr;
};
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
+fn airSlice(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
- const slice_ty = func.typeOfIndex(inst);
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
+ const slice_ty = cg.typeOfIndex(inst);
- const slice = try func.allocStack(slice_ty);
- try func.store(slice, lhs, Type.usize, 0);
- try func.store(slice, rhs, Type.usize, func.ptrSize());
+ const slice = try cg.allocStack(slice_ty);
+ try cg.store(slice, lhs, Type.usize, 0);
+ try cg.store(slice, rhs, Type.usize, cg.ptrSize());
- return func.finishAir(inst, slice, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, slice, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airSliceLen(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- return func.finishAir(inst, try func.sliceLen(operand), &.{ty_op.operand});
+ const operand = try cg.resolveInst(ty_op.operand);
+ return cg.finishAir(inst, try cg.sliceLen(operand), &.{ty_op.operand});
}
-fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airSliceElemVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const slice_ty = func.typeOf(bin_op.lhs);
- const slice = try func.resolveInst(bin_op.lhs);
- const index = try func.resolveInst(bin_op.rhs);
+ const slice_ty = cg.typeOf(bin_op.lhs);
+ const slice = try cg.resolveInst(bin_op.lhs);
+ const index = try cg.resolveInst(bin_op.rhs);
const elem_ty = slice_ty.childType(zcu);
const elem_size = elem_ty.abiSize(zcu);
// load pointer onto stack
- _ = try func.load(slice, Type.usize, 0);
+ _ = try cg.load(slice, Type.usize, 0);
// calculate index into slice
- try func.emitWValue(index);
- try func.addImm32(@intCast(elem_size));
- try func.addTag(.i32_mul);
- try func.addTag(.i32_add);
+ try cg.emitWValue(index);
+ try cg.addImm32(@intCast(elem_size));
+ try cg.addTag(.i32_mul);
+ try cg.addTag(.i32_add);
- const elem_result = if (isByRef(elem_ty, pt, func.target.*))
+ const elem_result = if (isByRef(elem_ty, zcu, cg.target))
.stack
else
- try func.load(.stack, elem_ty, 0);
+ try cg.load(.stack, elem_ty, 0);
- return func.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
+fn airSliceElemPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
const elem_ty = ty_pl.ty.toType().childType(zcu);
const elem_size = elem_ty.abiSize(zcu);
- const slice = try func.resolveInst(bin_op.lhs);
- const index = try func.resolveInst(bin_op.rhs);
+ const slice = try cg.resolveInst(bin_op.lhs);
+ const index = try cg.resolveInst(bin_op.rhs);
- _ = try func.load(slice, Type.usize, 0);
+ _ = try cg.load(slice, Type.usize, 0);
// calculate index into slice
- try func.emitWValue(index);
- try func.addImm32(@intCast(elem_size));
- try func.addTag(.i32_mul);
- try func.addTag(.i32_add);
+ try cg.emitWValue(index);
+ try cg.addImm32(@intCast(elem_size));
+ try cg.addTag(.i32_mul);
+ try cg.addTag(.i32_add);
- return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airSlicePtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- return func.finishAir(inst, try func.slicePtr(operand), &.{ty_op.operand});
+fn airSlicePtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand = try cg.resolveInst(ty_op.operand);
+ return cg.finishAir(inst, try cg.slicePtr(operand), &.{ty_op.operand});
}
-fn slicePtr(func: *CodeGen, operand: WValue) InnerError!WValue {
- const ptr = try func.load(operand, Type.usize, 0);
- return ptr.toLocal(func, Type.usize);
+fn slicePtr(cg: *CodeGen, operand: WValue) InnerError!WValue {
+ const ptr = try cg.load(operand, Type.usize, 0);
+ return ptr.toLocal(cg, Type.usize);
}
-fn sliceLen(func: *CodeGen, operand: WValue) InnerError!WValue {
- const len = try func.load(operand, Type.usize, func.ptrSize());
- return len.toLocal(func, Type.usize);
+fn sliceLen(cg: *CodeGen, operand: WValue) InnerError!WValue {
+ const len = try cg.load(operand, Type.usize, cg.ptrSize());
+ return len.toLocal(cg, Type.usize);
}
-fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airTrunc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
+ const operand = try cg.resolveInst(ty_op.operand);
const wanted_ty: Type = ty_op.ty.toType();
- const op_ty = func.typeOf(ty_op.operand);
- const pt = func.pt;
- const zcu = pt.zcu;
+ const op_ty = cg.typeOf(ty_op.operand);
+ const zcu = cg.pt.zcu;
if (wanted_ty.zigTypeTag(zcu) == .vector or op_ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO: trunc for vectors", .{});
+ return cg.fail("TODO: trunc for vectors", .{});
}
const result = if (op_ty.bitSize(zcu) == wanted_ty.bitSize(zcu))
- func.reuseOperand(ty_op.operand, operand)
+ cg.reuseOperand(ty_op.operand, operand)
else
- try func.trunc(operand, wanted_ty, op_ty);
+ try cg.trunc(operand, wanted_ty, op_ty);
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
/// Truncates a given operand to a given type, discarding any overflown bits.
/// NOTE: Resulting value is left on the stack.
-fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn trunc(cg: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue {
+ const zcu = cg.pt.zcu;
const given_bits = @as(u16, @intCast(given_ty.bitSize(zcu)));
if (toWasmBits(given_bits) == null) {
- return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits});
+ return cg.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits});
}
- var result = try func.intcast(operand, given_ty, wanted_ty);
+ var result = try cg.intcast(operand, given_ty, wanted_ty);
const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(zcu)));
const wasm_bits = toWasmBits(wanted_bits).?;
if (wasm_bits != wanted_bits) {
- result = try func.wrapOperand(result, wanted_ty);
+ result = try cg.wrapOperand(result, wanted_ty);
}
return result;
}
-fn airIntFromBool(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try func.resolveInst(un_op);
- const result = func.reuseOperand(un_op, operand);
+fn airIntFromBool(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const operand = try cg.resolveInst(un_op);
+ const result = cg.reuseOperand(un_op, operand);
- return func.finishAir(inst, result, &.{un_op});
+ return cg.finishAir(inst, result, &.{un_op});
}
-fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airArrayToSlice(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const array_ty = func.typeOf(ty_op.operand).childType(zcu);
+ const operand = try cg.resolveInst(ty_op.operand);
+ const array_ty = cg.typeOf(ty_op.operand).childType(zcu);
const slice_ty = ty_op.ty.toType();
// create a slice on the stack
- const slice_local = try func.allocStack(slice_ty);
+ const slice_local = try cg.allocStack(slice_ty);
// store the array ptr in the slice
if (array_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- try func.store(slice_local, operand, Type.usize, 0);
+ try cg.store(slice_local, operand, Type.usize, 0);
}
// store the length of the array in the slice
const array_len: u32 = @intCast(array_ty.arrayLen(zcu));
- try func.store(slice_local, .{ .imm32 = array_len }, Type.usize, func.ptrSize());
+ try cg.store(slice_local, .{ .imm32 = array_len }, Type.usize, cg.ptrSize());
- return func.finishAir(inst, slice_local, &.{ty_op.operand});
+ return cg.finishAir(inst, slice_local, &.{ty_op.operand});
}
-fn airIntFromPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try func.resolveInst(un_op);
- const ptr_ty = func.typeOf(un_op);
+fn airIntFromPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const operand = try cg.resolveInst(un_op);
+ const ptr_ty = cg.typeOf(un_op);
const result = if (ptr_ty.isSlice(zcu))
- try func.slicePtr(operand)
+ try cg.slicePtr(operand)
else switch (operand) {
// for stack offset, return a pointer to this offset.
- .stack_offset => try func.buildPointerOffset(operand, 0, .new),
- else => func.reuseOperand(un_op, operand),
+ .stack_offset => try cg.buildPointerOffset(operand, 0, .new),
+ else => cg.reuseOperand(un_op, operand),
};
- return func.finishAir(inst, result, &.{un_op});
+ return cg.finishAir(inst, result, &.{un_op});
}
-fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airPtrElemVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const ptr_ty = func.typeOf(bin_op.lhs);
- const ptr = try func.resolveInst(bin_op.lhs);
- const index = try func.resolveInst(bin_op.rhs);
+ const ptr_ty = cg.typeOf(bin_op.lhs);
+ const ptr = try cg.resolveInst(bin_op.lhs);
+ const index = try cg.resolveInst(bin_op.rhs);
const elem_ty = ptr_ty.childType(zcu);
const elem_size = elem_ty.abiSize(zcu);
// load pointer onto the stack
if (ptr_ty.isSlice(zcu)) {
- _ = try func.load(ptr, Type.usize, 0);
+ _ = try cg.load(ptr, Type.usize, 0);
} else {
- try func.lowerToStack(ptr);
+ try cg.lowerToStack(ptr);
}
// calculate index into slice
- try func.emitWValue(index);
- try func.addImm32(@intCast(elem_size));
- try func.addTag(.i32_mul);
- try func.addTag(.i32_add);
+ try cg.emitWValue(index);
+ try cg.addImm32(@intCast(elem_size));
+ try cg.addTag(.i32_mul);
+ try cg.addTag(.i32_add);
- const elem_result = if (isByRef(elem_ty, pt, func.target.*))
+ const elem_result = if (isByRef(elem_ty, zcu, cg.target))
.stack
else
- try func.load(.stack, elem_ty, 0);
+ try cg.load(.stack, elem_ty, 0);
- return func.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
+fn airPtrElemPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
- const ptr_ty = func.typeOf(bin_op.lhs);
+ const ptr_ty = cg.typeOf(bin_op.lhs);
const elem_ty = ty_pl.ty.toType().childType(zcu);
const elem_size = elem_ty.abiSize(zcu);
- const ptr = try func.resolveInst(bin_op.lhs);
- const index = try func.resolveInst(bin_op.rhs);
+ const ptr = try cg.resolveInst(bin_op.lhs);
+ const index = try cg.resolveInst(bin_op.rhs);
// load pointer onto the stack
if (ptr_ty.isSlice(zcu)) {
- _ = try func.load(ptr, Type.usize, 0);
+ _ = try cg.load(ptr, Type.usize, 0);
} else {
- try func.lowerToStack(ptr);
+ try cg.lowerToStack(ptr);
}
// calculate index into ptr
- try func.emitWValue(index);
- try func.addImm32(@intCast(elem_size));
- try func.addTag(.i32_mul);
- try func.addTag(.i32_add);
+ try cg.emitWValue(index);
+ try cg.addImm32(@intCast(elem_size));
+ try cg.addTag(.i32_mul);
+ try cg.addTag(.i32_add);
- return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
+fn airPtrBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
- const ptr = try func.resolveInst(bin_op.lhs);
- const offset = try func.resolveInst(bin_op.rhs);
- const ptr_ty = func.typeOf(bin_op.lhs);
+ const ptr = try cg.resolveInst(bin_op.lhs);
+ const offset = try cg.resolveInst(bin_op.rhs);
+ const ptr_ty = cg.typeOf(bin_op.lhs);
const pointee_ty = switch (ptr_ty.ptrSize(zcu)) {
.One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type
else => ptr_ty.childType(zcu),
};
- const valtype = typeToValtype(Type.usize, pt, func.target.*);
+ const valtype = typeToValtype(Type.usize, zcu, cg.target);
const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul });
const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op });
- try func.lowerToStack(ptr);
- try func.emitWValue(offset);
- try func.addImm32(@intCast(pointee_ty.abiSize(zcu)));
- try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
- try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
+ try cg.lowerToStack(ptr);
+ try cg.emitWValue(offset);
+ try cg.addImm32(@intCast(pointee_ty.abiSize(zcu)));
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
- return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn airMemset(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
+ const zcu = cg.pt.zcu;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
// TODO if the value is undef, don't lower this instruction
}
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const ptr = try func.resolveInst(bin_op.lhs);
- const ptr_ty = func.typeOf(bin_op.lhs);
- const value = try func.resolveInst(bin_op.rhs);
+ const ptr = try cg.resolveInst(bin_op.lhs);
+ const ptr_ty = cg.typeOf(bin_op.lhs);
+ const value = try cg.resolveInst(bin_op.rhs);
const len = switch (ptr_ty.ptrSize(zcu)) {
- .Slice => try func.sliceLen(ptr),
+ .Slice => try cg.sliceLen(ptr),
.One => @as(WValue, .{ .imm32 = @as(u32, @intCast(ptr_ty.childType(zcu).arrayLen(zcu))) }),
.C, .Many => unreachable,
};
@@ -4905,27 +4766,27 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
else
ptr_ty.childType(zcu);
- const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty);
- try func.memset(elem_ty, dst_ptr, len, value);
+ const dst_ptr = try cg.sliceOrArrayPtr(ptr, ptr_ty);
+ try cg.memset(elem_ty, dst_ptr, len, value);
- return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
}
/// Sets a region of memory at `ptr` to the value of `value`
/// When the user has enabled the bulk_memory feature, we lower
/// this to wasm's memset instruction. When the feature is not present,
/// we implement it manually.
-fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void {
- const pt = func.pt;
- const abi_size = @as(u32, @intCast(elem_ty.abiSize(pt.zcu)));
+fn memset(cg: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const abi_size = @as(u32, @intCast(elem_ty.abiSize(zcu)));
// When bulk_memory is enabled, we lower it to wasm's memset instruction.
// If not, we lower it ourselves.
- if (std.Target.wasm.featureSetHas(func.target.cpu.features, .bulk_memory) and abi_size == 1) {
- try func.lowerToStack(ptr);
- try func.emitWValue(value);
- try func.emitWValue(len);
- try func.addExtended(.memory_fill);
+ if (std.Target.wasm.featureSetHas(cg.target.cpu.features, .bulk_memory) and abi_size == 1) {
+ try cg.lowerToStack(ptr);
+ try cg.emitWValue(value);
+ try cg.emitWValue(len);
+ try cg.addExtended(.memory_fill);
return;
}
@@ -4933,100 +4794,95 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue
.imm32 => |val| .{ .imm32 = val * abi_size },
.imm64 => |val| .{ .imm64 = val * abi_size },
else => if (abi_size != 1) blk: {
- const new_len = try func.ensureAllocLocal(Type.usize);
- try func.emitWValue(len);
- switch (func.arch()) {
+ const new_len = try cg.ensureAllocLocal(Type.usize);
+ try cg.emitWValue(len);
+ switch (cg.ptr_size) {
.wasm32 => {
- try func.emitWValue(.{ .imm32 = abi_size });
- try func.addTag(.i32_mul);
+ try cg.emitWValue(.{ .imm32 = abi_size });
+ try cg.addTag(.i32_mul);
},
.wasm64 => {
- try func.emitWValue(.{ .imm64 = abi_size });
- try func.addTag(.i64_mul);
+ try cg.emitWValue(.{ .imm64 = abi_size });
+ try cg.addTag(.i64_mul);
},
- else => unreachable,
}
- try func.addLabel(.local_set, new_len.local.value);
+ try cg.addLocal(.local_set, new_len.local.value);
break :blk new_len;
} else len,
};
- var end_ptr = try func.allocLocal(Type.usize);
- defer end_ptr.free(func);
- var new_ptr = try func.buildPointerOffset(ptr, 0, .new);
- defer new_ptr.free(func);
+ var end_ptr = try cg.allocLocal(Type.usize);
+ defer end_ptr.free(cg);
+ var new_ptr = try cg.buildPointerOffset(ptr, 0, .new);
+ defer new_ptr.free(cg);
// get the loop conditional: if current pointer address equals final pointer's address
- try func.lowerToStack(ptr);
- try func.emitWValue(final_len);
- switch (func.arch()) {
- .wasm32 => try func.addTag(.i32_add),
- .wasm64 => try func.addTag(.i64_add),
- else => unreachable,
+ try cg.lowerToStack(ptr);
+ try cg.emitWValue(final_len);
+ switch (cg.ptr_size) {
+ .wasm32 => try cg.addTag(.i32_add),
+ .wasm64 => try cg.addTag(.i64_add),
}
- try func.addLabel(.local_set, end_ptr.local.value);
+ try cg.addLocal(.local_set, end_ptr.local.value);
// outer block to jump to when loop is done
- try func.startBlock(.block, wasm.block_empty);
- try func.startBlock(.loop, wasm.block_empty);
+ try cg.startBlock(.block, .empty);
+ try cg.startBlock(.loop, .empty);
// check for condition for loop end
- try func.emitWValue(new_ptr);
- try func.emitWValue(end_ptr);
- switch (func.arch()) {
- .wasm32 => try func.addTag(.i32_eq),
- .wasm64 => try func.addTag(.i64_eq),
- else => unreachable,
+ try cg.emitWValue(new_ptr);
+ try cg.emitWValue(end_ptr);
+ switch (cg.ptr_size) {
+ .wasm32 => try cg.addTag(.i32_eq),
+ .wasm64 => try cg.addTag(.i64_eq),
}
- try func.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
+ try cg.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
// store the value at the current position of the pointer
- try func.store(new_ptr, value, elem_ty, 0);
+ try cg.store(new_ptr, value, elem_ty, 0);
// move the pointer to the next element
- try func.emitWValue(new_ptr);
- switch (func.arch()) {
+ try cg.emitWValue(new_ptr);
+ switch (cg.ptr_size) {
.wasm32 => {
- try func.emitWValue(.{ .imm32 = abi_size });
- try func.addTag(.i32_add);
+ try cg.emitWValue(.{ .imm32 = abi_size });
+ try cg.addTag(.i32_add);
},
.wasm64 => {
- try func.emitWValue(.{ .imm64 = abi_size });
- try func.addTag(.i64_add);
+ try cg.emitWValue(.{ .imm64 = abi_size });
+ try cg.addTag(.i64_add);
},
- else => unreachable,
}
- try func.addLabel(.local_set, new_ptr.local.value);
+ try cg.addLocal(.local_set, new_ptr.local.value);
// end of loop
- try func.addLabel(.br, 0); // jump to start of loop
- try func.endBlock();
- try func.endBlock();
+ try cg.addLabel(.br, 0); // jump to start of loop
+ try cg.endBlock();
+ try cg.endBlock();
}
-fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airArrayElemVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const array_ty = func.typeOf(bin_op.lhs);
- const array = try func.resolveInst(bin_op.lhs);
- const index = try func.resolveInst(bin_op.rhs);
+ const array_ty = cg.typeOf(bin_op.lhs);
+ const array = try cg.resolveInst(bin_op.lhs);
+ const index = try cg.resolveInst(bin_op.rhs);
const elem_ty = array_ty.childType(zcu);
const elem_size = elem_ty.abiSize(zcu);
- if (isByRef(array_ty, pt, func.target.*)) {
- try func.lowerToStack(array);
- try func.emitWValue(index);
- try func.addImm32(@intCast(elem_size));
- try func.addTag(.i32_mul);
- try func.addTag(.i32_add);
+ if (isByRef(array_ty, zcu, cg.target)) {
+ try cg.lowerToStack(array);
+ try cg.emitWValue(index);
+ try cg.addImm32(@intCast(elem_size));
+ try cg.addTag(.i32_mul);
+ try cg.addTag(.i32_add);
} else {
- std.debug.assert(array_ty.zigTypeTag(zcu) == .vector);
+ assert(array_ty.zigTypeTag(zcu) == .vector);
switch (index) {
inline .imm32, .imm64 => |lane| {
- const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(zcu)) {
+ const opcode: std.wasm.SimdOpcode = switch (elem_ty.bitSize(zcu)) {
8 => if (elem_ty.isSignedInt(zcu)) .i8x16_extract_lane_s else .i8x16_extract_lane_u,
16 => if (elem_ty.isSignedInt(zcu)) .i16x8_extract_lane_s else .i16x8_extract_lane_u,
32 => if (elem_ty.isInt(zcu)) .i32x4_extract_lane else .f32x4_extract_lane,
@@ -5034,174 +4890,185 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
};
- var operands = [_]u32{ std.wasm.simdOpcode(opcode), @as(u8, @intCast(lane)) };
+ var operands = [_]u32{ @intFromEnum(opcode), @as(u8, @intCast(lane)) };
- try func.emitWValue(array);
+ try cg.emitWValue(array);
- const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
- try func.mir_extra.appendSlice(func.gpa, &operands);
- try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+ const extra_index = cg.extraLen();
+ try cg.mir_extra.appendSlice(cg.gpa, &operands);
+ try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
- return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
},
else => {
- const stack_vec = try func.allocStack(array_ty);
- try func.store(stack_vec, array, array_ty, 0);
+ const stack_vec = try cg.allocStack(array_ty);
+ try cg.store(stack_vec, array, array_ty, 0);
// Is a non-unrolled vector (v128)
- try func.lowerToStack(stack_vec);
- try func.emitWValue(index);
- try func.addImm32(@intCast(elem_size));
- try func.addTag(.i32_mul);
- try func.addTag(.i32_add);
+ try cg.lowerToStack(stack_vec);
+ try cg.emitWValue(index);
+ try cg.addImm32(@intCast(elem_size));
+ try cg.addTag(.i32_mul);
+ try cg.addTag(.i32_add);
},
}
}
- const elem_result = if (isByRef(elem_ty, pt, func.target.*))
+ const elem_result = if (isByRef(elem_ty, zcu, cg.target))
.stack
else
- try func.load(.stack, elem_ty, 0);
+ try cg.load(.stack, elem_ty, 0);
- return func.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airIntFromFloat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const op_ty = func.typeOf(ty_op.operand);
- const op_bits = op_ty.floatBits(func.target.*);
+ const operand = try cg.resolveInst(ty_op.operand);
+ const op_ty = cg.typeOf(ty_op.operand);
+ const op_bits = op_ty.floatBits(cg.target.*);
- const dest_ty = func.typeOfIndex(inst);
+ const dest_ty = cg.typeOfIndex(inst);
const dest_info = dest_ty.intInfo(zcu);
if (dest_info.bits > 128) {
- return func.fail("TODO: intFromFloat for integers/floats with bitsize {}", .{dest_info.bits});
+ return cg.fail("TODO: intFromFloat for integers/floats with bitsize {}", .{dest_info.bits});
}
if ((op_bits != 32 and op_bits != 64) or dest_info.bits > 64) {
- const dest_bitsize = if (dest_info.bits <= 16) 16 else std.math.ceilPowerOfTwoAssert(u16, dest_info.bits);
-
- var fn_name_buf: [16]u8 = undefined;
- const fn_name = std.fmt.bufPrint(&fn_name_buf, "__fix{s}{s}f{s}i", .{
- switch (dest_info.signedness) {
- .signed => "",
- .unsigned => "uns",
+ const dest_bitsize = if (dest_info.bits <= 32) 32 else std.math.ceilPowerOfTwoAssert(u16, dest_info.bits);
+
+ const intrinsic = switch (dest_info.signedness) {
+ inline .signed, .unsigned => |ct_s| switch (op_bits) {
+ inline 16, 32, 64, 80, 128 => |ct_op_bits| switch (dest_bitsize) {
+ inline 32, 64, 128 => |ct_dest_bits| @field(
+ Mir.Intrinsic,
+ "__fix" ++ switch (ct_s) {
+ .signed => "",
+ .unsigned => "uns",
+ } ++
+ compilerRtFloatAbbrev(ct_op_bits) ++ "f" ++
+ compilerRtIntAbbrev(ct_dest_bits) ++ "i",
+ ),
+ else => unreachable,
+ },
+ else => unreachable,
},
- target_util.compilerRtFloatAbbrev(op_bits),
- target_util.compilerRtIntAbbrev(dest_bitsize),
- }) catch unreachable;
-
- const result = try func.callIntrinsic(fn_name, &.{op_ty.ip_index}, dest_ty, &.{operand});
- return func.finishAir(inst, result, &.{ty_op.operand});
+ };
+ const result = try cg.callIntrinsic(intrinsic, &.{op_ty.ip_index}, dest_ty, &.{operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
const op = buildOpcode(.{
.op = .trunc,
- .valtype1 = typeToValtype(dest_ty, pt, func.target.*),
- .valtype2 = typeToValtype(op_ty, pt, func.target.*),
+ .valtype1 = typeToValtype(dest_ty, zcu, cg.target),
+ .valtype2 = typeToValtype(op_ty, zcu, cg.target),
.signedness = dest_info.signedness,
});
- try func.addTag(Mir.Inst.Tag.fromOpcode(op));
- const result = try func.wrapOperand(.stack, dest_ty);
- return func.finishAir(inst, result, &.{ty_op.operand});
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(op));
+ const result = try cg.wrapOperand(.stack, dest_ty);
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airFloatFromInt(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const op_ty = func.typeOf(ty_op.operand);
+ const operand = try cg.resolveInst(ty_op.operand);
+ const op_ty = cg.typeOf(ty_op.operand);
const op_info = op_ty.intInfo(zcu);
- const dest_ty = func.typeOfIndex(inst);
- const dest_bits = dest_ty.floatBits(func.target.*);
+ const dest_ty = cg.typeOfIndex(inst);
+ const dest_bits = dest_ty.floatBits(cg.target.*);
if (op_info.bits > 128) {
- return func.fail("TODO: floatFromInt for integers/floats with bitsize {d} bits", .{op_info.bits});
+ return cg.fail("TODO: floatFromInt for integers/floats with bitsize {d} bits", .{op_info.bits});
}
if (op_info.bits > 64 or (dest_bits > 64 or dest_bits < 32)) {
- const op_bitsize = if (op_info.bits <= 16) 16 else std.math.ceilPowerOfTwoAssert(u16, op_info.bits);
-
- var fn_name_buf: [16]u8 = undefined;
- const fn_name = std.fmt.bufPrint(&fn_name_buf, "__float{s}{s}i{s}f", .{
- switch (op_info.signedness) {
- .signed => "",
- .unsigned => "un",
+ const op_bitsize = if (op_info.bits <= 32) 32 else std.math.ceilPowerOfTwoAssert(u16, op_info.bits);
+
+ const intrinsic = switch (op_info.signedness) {
+ inline .signed, .unsigned => |ct_s| switch (op_bitsize) {
+ inline 32, 64, 128 => |ct_int_bits| switch (dest_bits) {
+ inline 16, 32, 64, 80, 128 => |ct_float_bits| @field(
+ Mir.Intrinsic,
+ "__float" ++ switch (ct_s) {
+ .signed => "",
+ .unsigned => "un",
+ } ++
+ compilerRtIntAbbrev(ct_int_bits) ++ "i" ++
+ compilerRtFloatAbbrev(ct_float_bits) ++ "f",
+ ),
+ else => unreachable,
+ },
+ else => unreachable,
},
- target_util.compilerRtIntAbbrev(op_bitsize),
- target_util.compilerRtFloatAbbrev(dest_bits),
- }) catch unreachable;
+ };
- const result = try func.callIntrinsic(fn_name, &.{op_ty.ip_index}, dest_ty, &.{operand});
- return func.finishAir(inst, result, &.{ty_op.operand});
+ const result = try cg.callIntrinsic(intrinsic, &.{op_ty.ip_index}, dest_ty, &.{operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
const op = buildOpcode(.{
.op = .convert,
- .valtype1 = typeToValtype(dest_ty, pt, func.target.*),
- .valtype2 = typeToValtype(op_ty, pt, func.target.*),
+ .valtype1 = typeToValtype(dest_ty, zcu, cg.target),
+ .valtype2 = typeToValtype(op_ty, zcu, cg.target),
.signedness = op_info.signedness,
});
- try func.addTag(Mir.Inst.Tag.fromOpcode(op));
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(op));
- return func.finishAir(inst, .stack, &.{ty_op.operand});
+ return cg.finishAir(inst, .stack, &.{ty_op.operand});
}
-fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const ty = func.typeOfIndex(inst);
+fn airSplat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand = try cg.resolveInst(ty_op.operand);
+ const ty = cg.typeOfIndex(inst);
const elem_ty = ty.childType(zcu);
- if (determineSimdStoreStrategy(ty, zcu, func.target.*) == .direct) blk: {
+ if (determineSimdStoreStrategy(ty, zcu, cg.target) == .direct) blk: {
switch (operand) {
// when the operand lives in the linear memory section, we can directly
// load and splat the value at once. Meaning we do not first have to load
// the scalar value onto the stack.
- .stack_offset, .memory, .memory_offset => {
+ .stack_offset, .nav_ref, .uav_ref => {
const opcode = switch (elem_ty.bitSize(zcu)) {
- 8 => std.wasm.simdOpcode(.v128_load8_splat),
- 16 => std.wasm.simdOpcode(.v128_load16_splat),
- 32 => std.wasm.simdOpcode(.v128_load32_splat),
- 64 => std.wasm.simdOpcode(.v128_load64_splat),
+ 8 => @intFromEnum(std.wasm.SimdOpcode.v128_load8_splat),
+ 16 => @intFromEnum(std.wasm.SimdOpcode.v128_load16_splat),
+ 32 => @intFromEnum(std.wasm.SimdOpcode.v128_load32_splat),
+ 64 => @intFromEnum(std.wasm.SimdOpcode.v128_load64_splat),
else => break :blk, // Cannot make use of simd-instructions
};
- try func.emitWValue(operand);
- // TODO: Add helper functions for simd opcodes
- const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
+ try cg.emitWValue(operand);
+ const extra_index: u32 = cg.extraLen();
// stores as := opcode, offset, alignment (opcode::memarg)
- try func.mir_extra.appendSlice(func.gpa, &[_]u32{
+ try cg.mir_extra.appendSlice(cg.gpa, &[_]u32{
opcode,
operand.offset(),
@intCast(elem_ty.abiAlignment(zcu).toByteUnits().?),
});
- try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
- return func.finishAir(inst, .stack, &.{ty_op.operand});
+ try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+ return cg.finishAir(inst, .stack, &.{ty_op.operand});
},
.local => {
const opcode = switch (elem_ty.bitSize(zcu)) {
- 8 => std.wasm.simdOpcode(.i8x16_splat),
- 16 => std.wasm.simdOpcode(.i16x8_splat),
- 32 => if (elem_ty.isInt(zcu)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat),
- 64 => if (elem_ty.isInt(zcu)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat),
+ 8 => @intFromEnum(std.wasm.SimdOpcode.i8x16_splat),
+ 16 => @intFromEnum(std.wasm.SimdOpcode.i16x8_splat),
+ 32 => if (elem_ty.isInt(zcu)) @intFromEnum(std.wasm.SimdOpcode.i32x4_splat) else @intFromEnum(std.wasm.SimdOpcode.f32x4_splat),
+ 64 => if (elem_ty.isInt(zcu)) @intFromEnum(std.wasm.SimdOpcode.i64x2_splat) else @intFromEnum(std.wasm.SimdOpcode.f64x2_splat),
else => break :blk, // Cannot make use of simd-instructions
};
- try func.emitWValue(operand);
- const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
- try func.mir_extra.append(func.gpa, opcode);
- try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
- return func.finishAir(inst, .stack, &.{ty_op.operand});
+ try cg.emitWValue(operand);
+ const extra_index = cg.extraLen();
+ try cg.mir_extra.append(cg.gpa, opcode);
+ try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+ return cg.finishAir(inst, .stack, &.{ty_op.operand});
},
else => unreachable,
}
@@ -5209,38 +5076,38 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const elem_size = elem_ty.bitSize(zcu);
const vector_len = @as(usize, @intCast(ty.vectorLen(zcu)));
if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) {
- return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
+ return cg.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
}
- const result = try func.allocStack(ty);
+ const result = try cg.allocStack(ty);
const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(zcu)));
var index: usize = 0;
var offset: u32 = 0;
while (index < vector_len) : (index += 1) {
- try func.store(result, operand, elem_ty, offset);
+ try cg.store(result, operand, elem_ty, offset);
offset += elem_byte_size;
}
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
- const operand = try func.resolveInst(pl_op.operand);
+fn airSelect(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const operand = try cg.resolveInst(pl_op.operand);
_ = operand;
- return func.fail("TODO: Implement wasm airSelect", .{});
+ return cg.fail("TODO: Implement wasm airSelect", .{});
}
-fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+fn airShuffle(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pt = cg.pt;
const zcu = pt.zcu;
- const inst_ty = func.typeOfIndex(inst);
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data;
+ const inst_ty = cg.typeOfIndex(inst);
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.Shuffle, ty_pl.payload).data;
- const a = try func.resolveInst(extra.a);
- const b = try func.resolveInst(extra.b);
+ const a = try cg.resolveInst(extra.a);
+ const b = try cg.resolveInst(extra.b);
const mask = Value.fromInterned(extra.mask);
const mask_len = extra.mask_len;
@@ -5248,26 +5115,26 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const elem_size = child_ty.abiSize(zcu);
// TODO: One of them could be by ref; handle in loop
- if (isByRef(func.typeOf(extra.a), pt, func.target.*) or isByRef(inst_ty, pt, func.target.*)) {
- const result = try func.allocStack(inst_ty);
+ if (isByRef(cg.typeOf(extra.a), zcu, cg.target) or isByRef(inst_ty, zcu, cg.target)) {
+ const result = try cg.allocStack(inst_ty);
for (0..mask_len) |index| {
const value = (try mask.elemValue(pt, index)).toSignedInt(zcu);
- try func.emitWValue(result);
+ try cg.emitWValue(result);
const loaded = if (value >= 0)
- try func.load(a, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * value)))
+ try cg.load(a, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * value)))
else
- try func.load(b, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * ~value)));
+ try cg.load(b, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * ~value)));
- try func.store(.stack, loaded, child_ty, result.stack_offset.value + @as(u32, @intCast(elem_size)) * @as(u32, @intCast(index)));
+ try cg.store(.stack, loaded, child_ty, result.stack_offset.value + @as(u32, @intCast(elem_size)) * @as(u32, @intCast(index)));
}
- return func.finishAir(inst, result, &.{ extra.a, extra.b });
+ return cg.finishAir(inst, result, &.{ extra.a, extra.b });
} else {
var operands = [_]u32{
- std.wasm.simdOpcode(.i8x16_shuffle),
+ @intFromEnum(std.wasm.SimdOpcode.i8x16_shuffle),
} ++ [1]u32{undefined} ** 4;
var lanes = mem.asBytes(operands[1..]);
@@ -5283,91 +5150,91 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
}
- try func.emitWValue(a);
- try func.emitWValue(b);
+ try cg.emitWValue(a);
+ try cg.emitWValue(b);
- const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
- try func.mir_extra.appendSlice(func.gpa, &operands);
- try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+ const extra_index = cg.extraLen();
+ try cg.mir_extra.appendSlice(cg.gpa, &operands);
+ try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
- return func.finishAir(inst, .stack, &.{ extra.a, extra.b });
+ return cg.finishAir(inst, .stack, &.{ extra.a, extra.b });
}
}
-fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const reduce = func.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
- const operand = try func.resolveInst(reduce.operand);
+fn airReduce(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const reduce = cg.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
+ const operand = try cg.resolveInst(reduce.operand);
_ = operand;
- return func.fail("TODO: Implement wasm airReduce", .{});
+ return cg.fail("TODO: Implement wasm airReduce", .{});
}
-fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pt = cg.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const result_ty = func.typeOfIndex(inst);
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const result_ty = cg.typeOfIndex(inst);
const len = @as(usize, @intCast(result_ty.arrayLen(zcu)));
- const elements = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[ty_pl.payload..][0..len]));
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(cg.air.extra[ty_pl.payload..][0..len]));
const result: WValue = result_value: {
switch (result_ty.zigTypeTag(zcu)) {
.array => {
- const result = try func.allocStack(result_ty);
+ const result = try cg.allocStack(result_ty);
const elem_ty = result_ty.childType(zcu);
const elem_size = @as(u32, @intCast(elem_ty.abiSize(zcu)));
const sentinel = if (result_ty.sentinel(zcu)) |sent| blk: {
- break :blk try func.lowerConstant(sent, elem_ty);
+ break :blk try cg.lowerConstant(sent, elem_ty);
} else null;
// When the element type is by reference, we must copy the entire
// value. It is therefore safer to move the offset pointer and store
// each value individually, instead of using store offsets.
- if (isByRef(elem_ty, pt, func.target.*)) {
+ if (isByRef(elem_ty, zcu, cg.target)) {
// copy stack pointer into a temporary local, which is
// moved for each element to store each value in the right position.
- const offset = try func.buildPointerOffset(result, 0, .new);
+ const offset = try cg.buildPointerOffset(result, 0, .new);
for (elements, 0..) |elem, elem_index| {
- const elem_val = try func.resolveInst(elem);
- try func.store(offset, elem_val, elem_ty, 0);
+ const elem_val = try cg.resolveInst(elem);
+ try cg.store(offset, elem_val, elem_ty, 0);
if (elem_index < elements.len - 1 and sentinel == null) {
- _ = try func.buildPointerOffset(offset, elem_size, .modify);
+ _ = try cg.buildPointerOffset(offset, elem_size, .modify);
}
}
if (sentinel) |sent| {
- try func.store(offset, sent, elem_ty, 0);
+ try cg.store(offset, sent, elem_ty, 0);
}
} else {
var offset: u32 = 0;
for (elements) |elem| {
- const elem_val = try func.resolveInst(elem);
- try func.store(result, elem_val, elem_ty, offset);
+ const elem_val = try cg.resolveInst(elem);
+ try cg.store(result, elem_val, elem_ty, offset);
offset += elem_size;
}
if (sentinel) |sent| {
- try func.store(result, sent, elem_ty, offset);
+ try cg.store(result, sent, elem_ty, offset);
}
}
break :result_value result;
},
.@"struct" => switch (result_ty.containerLayout(zcu)) {
.@"packed" => {
- if (isByRef(result_ty, pt, func.target.*)) {
- return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
+ if (isByRef(result_ty, zcu, cg.target)) {
+ return cg.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
}
const packed_struct = zcu.typeToPackedStruct(result_ty).?;
const field_types = packed_struct.field_types;
const backing_type = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
// ensure the result is zero'd
- const result = try func.allocLocal(backing_type);
+ const result = try cg.allocLocal(backing_type);
if (backing_type.bitSize(zcu) <= 32)
- try func.addImm32(0)
+ try cg.addImm32(0)
else
- try func.addImm64(0);
- try func.addLabel(.local_set, result.local.value);
+ try cg.addImm64(0);
+ try cg.addLocal(.local_set, result.local.value);
var current_bit: u16 = 0;
for (elements, 0..) |elem, elem_index| {
@@ -5379,46 +5246,46 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else
.{ .imm64 = current_bit };
- const value = try func.resolveInst(elem);
+ const value = try cg.resolveInst(elem);
const value_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
const int_ty = try pt.intType(.unsigned, value_bit_size);
// load our current result on stack so we can perform all transformations
// using only stack values. Saving the cost of loads and stores.
- try func.emitWValue(result);
- const bitcasted = try func.bitcast(int_ty, field_ty, value);
- const extended_val = try func.intcast(bitcasted, int_ty, backing_type);
+ try cg.emitWValue(result);
+ const bitcasted = try cg.bitcast(int_ty, field_ty, value);
+ const extended_val = try cg.intcast(bitcasted, int_ty, backing_type);
// no need to shift any values when the current offset is 0
const shifted = if (current_bit != 0) shifted: {
- break :shifted try func.binOp(extended_val, shift_val, backing_type, .shl);
+ break :shifted try cg.binOp(extended_val, shift_val, backing_type, .shl);
} else extended_val;
// we ignore the result as we keep it on the stack to assign it directly to `result`
- _ = try func.binOp(.stack, shifted, backing_type, .@"or");
- try func.addLabel(.local_set, result.local.value);
+ _ = try cg.binOp(.stack, shifted, backing_type, .@"or");
+ try cg.addLocal(.local_set, result.local.value);
current_bit += value_bit_size;
}
break :result_value result;
},
else => {
- const result = try func.allocStack(result_ty);
- const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset
+ const result = try cg.allocStack(result_ty);
+ const offset = try cg.buildPointerOffset(result, 0, .new); // pointer to offset
var prev_field_offset: u64 = 0;
for (elements, 0..) |elem, elem_index| {
if (try result_ty.structFieldValueComptime(pt, elem_index) != null) continue;
const elem_ty = result_ty.fieldType(elem_index, zcu);
const field_offset = result_ty.structFieldOffset(elem_index, zcu);
- _ = try func.buildPointerOffset(offset, @intCast(field_offset - prev_field_offset), .modify);
+ _ = try cg.buildPointerOffset(offset, @intCast(field_offset - prev_field_offset), .modify);
prev_field_offset = field_offset;
- const value = try func.resolveInst(elem);
- try func.store(offset, value, elem_ty, 0);
+ const value = try cg.resolveInst(elem);
+ try cg.store(offset, value, elem_ty, 0);
}
break :result_value result;
},
},
- .vector => return func.fail("TODO: Wasm backend: implement airAggregateInit for vectors", .{}),
+ .vector => return cg.fail("TODO: Wasm backend: implement airAggregateInit for vectors", .{}),
else => unreachable,
}
};
@@ -5426,22 +5293,22 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (elements.len <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
@memcpy(buf[0..elements.len], elements);
- return func.finishAir(inst, result, &buf);
+ return cg.finishAir(inst, result, &buf);
}
- var bt = try func.iterateBigTomb(inst, elements.len);
+ var bt = try cg.iterateBigTomb(inst, elements.len);
for (elements) |arg| bt.feed(arg);
return bt.finishAir(result);
}
-fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+fn airUnionInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pt = cg.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.UnionInit, ty_pl.payload).data;
const result = result: {
- const union_ty = func.typeOfIndex(inst);
+ const union_ty = cg.typeOfIndex(inst);
const layout = union_ty.unionGetLayout(zcu);
const union_obj = zcu.typeToUnion(union_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
@@ -5451,34 +5318,34 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const tag_ty = union_ty.unionTagTypeHypothetical(zcu);
const enum_field_index = tag_ty.enumFieldIndex(field_name, zcu).?;
const tag_val = try pt.enumValueFieldIndex(tag_ty, enum_field_index);
- break :blk try func.lowerConstant(tag_val, tag_ty);
+ break :blk try cg.lowerConstant(tag_val, tag_ty);
};
if (layout.payload_size == 0) {
if (layout.tag_size == 0) {
break :result .none;
}
- assert(!isByRef(union_ty, pt, func.target.*));
+ assert(!isByRef(union_ty, zcu, cg.target));
break :result tag_int;
}
- if (isByRef(union_ty, pt, func.target.*)) {
- const result_ptr = try func.allocStack(union_ty);
- const payload = try func.resolveInst(extra.init);
+ if (isByRef(union_ty, zcu, cg.target)) {
+ const result_ptr = try cg.allocStack(union_ty);
+ const payload = try cg.resolveInst(extra.init);
if (layout.tag_align.compare(.gte, layout.payload_align)) {
- if (isByRef(field_ty, pt, func.target.*)) {
- const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
- try func.store(payload_ptr, payload, field_ty, 0);
+ if (isByRef(field_ty, zcu, cg.target)) {
+ const payload_ptr = try cg.buildPointerOffset(result_ptr, layout.tag_size, .new);
+ try cg.store(payload_ptr, payload, field_ty, 0);
} else {
- try func.store(result_ptr, payload, field_ty, @intCast(layout.tag_size));
+ try cg.store(result_ptr, payload, field_ty, @intCast(layout.tag_size));
}
if (layout.tag_size > 0) {
- try func.store(result_ptr, tag_int, Type.fromInterned(union_obj.enum_tag_ty), 0);
+ try cg.store(result_ptr, tag_int, Type.fromInterned(union_obj.enum_tag_ty), 0);
}
} else {
- try func.store(result_ptr, payload, field_ty, 0);
+ try cg.store(result_ptr, payload, field_ty, 0);
if (layout.tag_size > 0) {
- try func.store(
+ try cg.store(
result_ptr,
tag_int,
Type.fromInterned(union_obj.enum_tag_ty),
@@ -5488,138 +5355,136 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
break :result result_ptr;
} else {
- const operand = try func.resolveInst(extra.init);
+ const operand = try cg.resolveInst(extra.init);
const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(zcu))));
if (field_ty.zigTypeTag(zcu) == .float) {
const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(zcu)));
- const bitcasted = try func.bitcast(field_ty, int_type, operand);
- break :result try func.trunc(bitcasted, int_type, union_int_type);
+ const bitcasted = try cg.bitcast(field_ty, int_type, operand);
+ break :result try cg.trunc(bitcasted, int_type, union_int_type);
} else if (field_ty.isPtrAtRuntime(zcu)) {
const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(zcu)));
- break :result try func.intcast(operand, int_type, union_int_type);
+ break :result try cg.intcast(operand, int_type, union_int_type);
}
- break :result try func.intcast(operand, field_ty, union_int_type);
+ break :result try cg.intcast(operand, field_ty, union_int_type);
}
};
- return func.finishAir(inst, result, &.{extra.init});
+ return cg.finishAir(inst, result, &.{extra.init});
}
-fn airPrefetch(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const prefetch = func.air.instructions.items(.data)[@intFromEnum(inst)].prefetch;
- return func.finishAir(inst, .none, &.{prefetch.ptr});
+fn airPrefetch(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const prefetch = cg.air.instructions.items(.data)[@intFromEnum(inst)].prefetch;
+ return cg.finishAir(inst, .none, &.{prefetch.ptr});
}
-fn airWasmMemorySize(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+fn airWasmMemorySize(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
- try func.addLabel(.memory_size, pl_op.payload);
- return func.finishAir(inst, .stack, &.{pl_op.operand});
+ try cg.addLabel(.memory_size, pl_op.payload);
+ return cg.finishAir(inst, .stack, &.{pl_op.operand});
}
-fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void {
- const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+fn airWasmMemoryGrow(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
- const operand = try func.resolveInst(pl_op.operand);
- try func.emitWValue(operand);
- try func.addLabel(.memory_grow, pl_op.payload);
- return func.finishAir(inst, .stack, &.{pl_op.operand});
+ const operand = try cg.resolveInst(pl_op.operand);
+ try cg.emitWValue(operand);
+ try cg.addLabel(.memory_grow, pl_op.payload);
+ return cg.finishAir(inst, .stack, &.{pl_op.operand});
}
-fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn cmpOptionals(cg: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
+ const zcu = cg.pt.zcu;
assert(operand_ty.hasRuntimeBitsIgnoreComptime(zcu));
assert(op == .eq or op == .neq);
const payload_ty = operand_ty.optionalChild(zcu);
// We store the final result in here that will be validated
// if the optional is truly equal.
- var result = try func.ensureAllocLocal(Type.i32);
- defer result.free(func);
-
- try func.startBlock(.block, wasm.block_empty);
- _ = try func.isNull(lhs, operand_ty, .i32_eq);
- _ = try func.isNull(rhs, operand_ty, .i32_eq);
- try func.addTag(.i32_ne); // inverse so we can exit early
- try func.addLabel(.br_if, 0);
-
- _ = try func.load(lhs, payload_ty, 0);
- _ = try func.load(rhs, payload_ty, 0);
- const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, pt, func.target.*) });
- try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
- try func.addLabel(.br_if, 0);
-
- try func.addImm32(1);
- try func.addLabel(.local_set, result.local.value);
- try func.endBlock();
-
- try func.emitWValue(result);
- try func.addImm32(0);
- try func.addTag(if (op == .eq) .i32_ne else .i32_eq);
+ var result = try cg.ensureAllocLocal(Type.i32);
+ defer result.free(cg);
+
+ try cg.startBlock(.block, .empty);
+ _ = try cg.isNull(lhs, operand_ty, .i32_eq);
+ _ = try cg.isNull(rhs, operand_ty, .i32_eq);
+ try cg.addTag(.i32_ne); // inverse so we can exit early
+ try cg.addLabel(.br_if, 0);
+
+ _ = try cg.load(lhs, payload_ty, 0);
+ _ = try cg.load(rhs, payload_ty, 0);
+ const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, zcu, cg.target) });
+ try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+ try cg.addLabel(.br_if, 0);
+
+ try cg.addImm32(1);
+ try cg.addLocal(.local_set, result.local.value);
+ try cg.endBlock();
+
+ try cg.emitWValue(result);
+ try cg.addImm32(0);
+ try cg.addTag(if (op == .eq) .i32_ne else .i32_eq);
return .stack;
}
/// Compares big integers by checking both its high bits and low bits.
/// NOTE: Leaves the result of the comparison on top of the stack.
/// TODO: Lower this to compiler_rt call when bitsize > 128
-fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn cmpBigInt(cg: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
+ const zcu = cg.pt.zcu;
assert(operand_ty.abiSize(zcu) >= 16);
assert(!(lhs != .stack and rhs == .stack));
if (operand_ty.bitSize(zcu) > 128) {
- return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(zcu)});
+ return cg.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(zcu)});
}
- var lhs_msb = try (try func.load(lhs, Type.u64, 8)).toLocal(func, Type.u64);
- defer lhs_msb.free(func);
- var rhs_msb = try (try func.load(rhs, Type.u64, 8)).toLocal(func, Type.u64);
- defer rhs_msb.free(func);
+ var lhs_msb = try (try cg.load(lhs, Type.u64, 8)).toLocal(cg, Type.u64);
+ defer lhs_msb.free(cg);
+ var rhs_msb = try (try cg.load(rhs, Type.u64, 8)).toLocal(cg, Type.u64);
+ defer rhs_msb.free(cg);
switch (op) {
.eq, .neq => {
- const xor_high = try func.binOp(lhs_msb, rhs_msb, Type.u64, .xor);
- const lhs_lsb = try func.load(lhs, Type.u64, 0);
- const rhs_lsb = try func.load(rhs, Type.u64, 0);
- const xor_low = try func.binOp(lhs_lsb, rhs_lsb, Type.u64, .xor);
- const or_result = try func.binOp(xor_high, xor_low, Type.u64, .@"or");
+ const xor_high = try cg.binOp(lhs_msb, rhs_msb, Type.u64, .xor);
+ const lhs_lsb = try cg.load(lhs, Type.u64, 0);
+ const rhs_lsb = try cg.load(rhs, Type.u64, 0);
+ const xor_low = try cg.binOp(lhs_lsb, rhs_lsb, Type.u64, .xor);
+ const or_result = try cg.binOp(xor_high, xor_low, Type.u64, .@"or");
switch (op) {
- .eq => return func.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .eq),
- .neq => return func.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .neq),
+ .eq => return cg.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .eq),
+ .neq => return cg.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .neq),
else => unreachable,
}
},
else => {
const ty = if (operand_ty.isSignedInt(zcu)) Type.i64 else Type.u64;
// leave those value on top of the stack for '.select'
- const lhs_lsb = try func.load(lhs, Type.u64, 0);
- const rhs_lsb = try func.load(rhs, Type.u64, 0);
- _ = try func.cmp(lhs_lsb, rhs_lsb, Type.u64, op);
- _ = try func.cmp(lhs_msb, rhs_msb, ty, op);
- _ = try func.cmp(lhs_msb, rhs_msb, ty, .eq);
- try func.addTag(.select);
+ const lhs_lsb = try cg.load(lhs, Type.u64, 0);
+ const rhs_lsb = try cg.load(rhs, Type.u64, 0);
+ _ = try cg.cmp(lhs_lsb, rhs_lsb, Type.u64, op);
+ _ = try cg.cmp(lhs_msb, rhs_msb, ty, op);
+ _ = try cg.cmp(lhs_msb, rhs_msb, ty, .eq);
+ try cg.addTag(.select);
},
}
return .stack;
}
-fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+fn airSetUnionTag(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pt = cg.pt;
const zcu = pt.zcu;
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const un_ty = func.typeOf(bin_op.lhs).childType(zcu);
- const tag_ty = func.typeOf(bin_op.rhs);
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const un_ty = cg.typeOf(bin_op.lhs).childType(zcu);
+ const tag_ty = cg.typeOf(bin_op.rhs);
const layout = un_ty.unionGetLayout(zcu);
- if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+ if (layout.tag_size == 0) return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
- const union_ptr = try func.resolveInst(bin_op.lhs);
- const new_tag = try func.resolveInst(bin_op.rhs);
+ const union_ptr = try cg.resolveInst(bin_op.lhs);
+ const new_tag = try cg.resolveInst(bin_op.rhs);
if (layout.payload_size == 0) {
- try func.store(union_ptr, new_tag, tag_ty, 0);
- return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+ try cg.store(union_ptr, new_tag, tag_ty, 0);
+ return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
}
// when the tag alignment is smaller than the payload, the field will be stored
@@ -5627,124 +5492,147 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align)) blk: {
break :blk @intCast(layout.payload_size);
} else 0;
- try func.store(union_ptr, new_tag, tag_ty, offset);
- return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+ try cg.store(union_ptr, new_tag, tag_ty, offset);
+ return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const zcu = func.pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airGetUnionTag(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const un_ty = func.typeOf(ty_op.operand);
- const tag_ty = func.typeOfIndex(inst);
+ const un_ty = cg.typeOf(ty_op.operand);
+ const tag_ty = cg.typeOfIndex(inst);
const layout = un_ty.unionGetLayout(zcu);
- if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand});
+ if (layout.tag_size == 0) return cg.finishAir(inst, .none, &.{ty_op.operand});
- const operand = try func.resolveInst(ty_op.operand);
+ const operand = try cg.resolveInst(ty_op.operand);
// when the tag alignment is smaller than the payload, the field will be stored
// after the payload.
const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align))
@intCast(layout.payload_size)
else
0;
- const result = try func.load(operand, tag_ty, offset);
- return func.finishAir(inst, result, &.{ty_op.operand});
+ const result = try cg.load(operand, tag_ty, offset);
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airFpext(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airFpext(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const dest_ty = func.typeOfIndex(inst);
- const operand = try func.resolveInst(ty_op.operand);
- const result = try func.fpext(operand, func.typeOf(ty_op.operand), dest_ty);
- return func.finishAir(inst, result, &.{ty_op.operand});
+ const dest_ty = cg.typeOfIndex(inst);
+ const operand = try cg.resolveInst(ty_op.operand);
+ const result = try cg.fpext(operand, cg.typeOf(ty_op.operand), dest_ty);
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-/// Extends a float from a given `Type` to a larger wanted `Type`
-/// NOTE: Leaves the result on the stack
-fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
- const given_bits = given.floatBits(func.target.*);
- const wanted_bits = wanted.floatBits(func.target.*);
-
- if (wanted_bits == 64 and given_bits == 32) {
- try func.emitWValue(operand);
- try func.addTag(.f64_promote_f32);
- return .stack;
- } else if (given_bits == 16 and wanted_bits <= 64) {
- // call __extendhfsf2(f16) f32
- const f32_result = try func.callIntrinsic(
- "__extendhfsf2",
- &.{.f16_type},
- Type.f32,
- &.{operand},
- );
- std.debug.assert(f32_result == .stack);
-
- if (wanted_bits == 64) {
- try func.addTag(.f64_promote_f32);
- }
- return .stack;
- }
-
- var fn_name_buf: [13]u8 = undefined;
- const fn_name = std.fmt.bufPrint(&fn_name_buf, "__extend{s}f{s}f2", .{
- target_util.compilerRtFloatAbbrev(given_bits),
- target_util.compilerRtFloatAbbrev(wanted_bits),
- }) catch unreachable;
+/// Extends a float from a given `Type` to a larger wanted `Type`, leaving the
+/// result on the stack.
+fn fpext(cg: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
+ const given_bits = given.floatBits(cg.target.*);
+ const wanted_bits = wanted.floatBits(cg.target.*);
- return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand});
+ const intrinsic: Mir.Intrinsic = switch (given_bits) {
+ 16 => switch (wanted_bits) {
+ 32 => {
+ assert(.stack == try cg.callIntrinsic(.__extendhfsf2, &.{.f16_type}, Type.f32, &.{operand}));
+ return .stack;
+ },
+ 64 => {
+ assert(.stack == try cg.callIntrinsic(.__extendhfsf2, &.{.f16_type}, Type.f32, &.{operand}));
+ try cg.addTag(.f64_promote_f32);
+ return .stack;
+ },
+ 80 => .__extendhfxf2,
+ 128 => .__extendhftf2,
+ else => unreachable,
+ },
+ 32 => switch (wanted_bits) {
+ 64 => {
+ try cg.emitWValue(operand);
+ try cg.addTag(.f64_promote_f32);
+ return .stack;
+ },
+ 80 => .__extendsfxf2,
+ 128 => .__extendsftf2,
+ else => unreachable,
+ },
+ 64 => switch (wanted_bits) {
+ 80 => .__extenddfxf2,
+ 128 => .__extenddftf2,
+ else => unreachable,
+ },
+ 80 => switch (wanted_bits) {
+ 128 => .__extendxftf2,
+ else => unreachable,
+ },
+ else => unreachable,
+ };
+ return cg.callIntrinsic(intrinsic, &.{given.ip_index}, wanted, &.{operand});
}
-fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airFptrunc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const dest_ty = func.typeOfIndex(inst);
- const operand = try func.resolveInst(ty_op.operand);
- const result = try func.fptrunc(operand, func.typeOf(ty_op.operand), dest_ty);
- return func.finishAir(inst, result, &.{ty_op.operand});
+ const dest_ty = cg.typeOfIndex(inst);
+ const operand = try cg.resolveInst(ty_op.operand);
+ const result = try cg.fptrunc(operand, cg.typeOf(ty_op.operand), dest_ty);
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-/// Truncates a float from a given `Type` to its wanted `Type`
-/// NOTE: The result value remains on the stack
-fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
- const given_bits = given.floatBits(func.target.*);
- const wanted_bits = wanted.floatBits(func.target.*);
+/// Truncates a float from a given `Type` to its wanted `Type`, leaving the
+/// result on the stack.
+fn fptrunc(cg: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
+ const given_bits = given.floatBits(cg.target.*);
+ const wanted_bits = wanted.floatBits(cg.target.*);
- if (wanted_bits == 32 and given_bits == 64) {
- try func.emitWValue(operand);
- try func.addTag(.f32_demote_f64);
- return .stack;
- } else if (wanted_bits == 16 and given_bits <= 64) {
- const op: WValue = if (given_bits == 64) blk: {
- try func.emitWValue(operand);
- try func.addTag(.f32_demote_f64);
- break :blk .stack;
- } else operand;
-
- // call __truncsfhf2(f32) f16
- return func.callIntrinsic("__truncsfhf2", &.{.f32_type}, Type.f16, &.{op});
- }
-
- var fn_name_buf: [12]u8 = undefined;
- const fn_name = std.fmt.bufPrint(&fn_name_buf, "__trunc{s}f{s}f2", .{
- target_util.compilerRtFloatAbbrev(given_bits),
- target_util.compilerRtFloatAbbrev(wanted_bits),
- }) catch unreachable;
-
- return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand});
+ const intrinsic: Mir.Intrinsic = switch (given_bits) {
+ 32 => switch (wanted_bits) {
+ 16 => {
+ return cg.callIntrinsic(.__truncsfhf2, &.{.f32_type}, Type.f16, &.{operand});
+ },
+ else => unreachable,
+ },
+ 64 => switch (wanted_bits) {
+ 16 => {
+ try cg.emitWValue(operand);
+ try cg.addTag(.f32_demote_f64);
+ return cg.callIntrinsic(.__truncsfhf2, &.{.f32_type}, Type.f16, &.{.stack});
+ },
+ 32 => {
+ try cg.emitWValue(operand);
+ try cg.addTag(.f32_demote_f64);
+ return .stack;
+ },
+ else => unreachable,
+ },
+ 80 => switch (wanted_bits) {
+ 16 => .__truncxfhf2,
+ 32 => .__truncxfsf2,
+ 64 => .__truncxfdf2,
+ else => unreachable,
+ },
+ 128 => switch (wanted_bits) {
+ 16 => .__trunctfhf2,
+ 32 => .__trunctfsf2,
+ 64 => .__trunctfdf2,
+ 80 => .__trunctfxf2,
+ else => unreachable,
+ },
+ else => unreachable,
+ };
+ return cg.callIntrinsic(intrinsic, &.{given.ip_index}, wanted, &.{operand});
}
-fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airErrUnionPayloadPtrSet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const err_set_ty = func.typeOf(ty_op.operand).childType(zcu);
+ const err_set_ty = cg.typeOf(ty_op.operand).childType(zcu);
const payload_ty = err_set_ty.errorUnionPayload(zcu);
- const operand = try func.resolveInst(ty_op.operand);
+ const operand = try cg.resolveInst(ty_op.operand);
// set error-tag to '0' to annotate error union is non-error
- try func.store(
+ try cg.store(
operand,
.{ .imm32 = 0 },
Type.anyerror,
@@ -5753,63 +5641,60 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
const result = result: {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
- break :result func.reuseOperand(ty_op.operand, operand);
+ break :result cg.reuseOperand(ty_op.operand, operand);
}
- break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu))), .new);
+ break :result try cg.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu))), .new);
};
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
+fn airFieldParentPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
- const field_ptr = try func.resolveInst(extra.field_ptr);
+ const field_ptr = try cg.resolveInst(extra.field_ptr);
const parent_ty = ty_pl.ty.toType().childType(zcu);
const field_offset = parent_ty.structFieldOffset(extra.field_index, zcu);
const result = if (field_offset != 0) result: {
- const base = try func.buildPointerOffset(field_ptr, 0, .new);
- try func.addLabel(.local_get, base.local.value);
- try func.addImm32(@intCast(field_offset));
- try func.addTag(.i32_sub);
- try func.addLabel(.local_set, base.local.value);
+ const base = try cg.buildPointerOffset(field_ptr, 0, .new);
+ try cg.addLocal(.local_get, base.local.value);
+ try cg.addImm32(@intCast(field_offset));
+ try cg.addTag(.i32_sub);
+ try cg.addLocal(.local_set, base.local.value);
break :result base;
- } else func.reuseOperand(extra.field_ptr, field_ptr);
+ } else cg.reuseOperand(extra.field_ptr, field_ptr);
- return func.finishAir(inst, result, &.{extra.field_ptr});
+ return cg.finishAir(inst, result, &.{extra.field_ptr});
}
-fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn sliceOrArrayPtr(cg: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue {
+ const zcu = cg.pt.zcu;
if (ptr_ty.isSlice(zcu)) {
- return func.slicePtr(ptr);
+ return cg.slicePtr(ptr);
} else {
return ptr;
}
}
-fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const dst = try func.resolveInst(bin_op.lhs);
- const dst_ty = func.typeOf(bin_op.lhs);
+fn airMemcpy(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const dst = try cg.resolveInst(bin_op.lhs);
+ const dst_ty = cg.typeOf(bin_op.lhs);
const ptr_elem_ty = dst_ty.childType(zcu);
- const src = try func.resolveInst(bin_op.rhs);
- const src_ty = func.typeOf(bin_op.rhs);
+ const src = try cg.resolveInst(bin_op.rhs);
+ const src_ty = cg.typeOf(bin_op.rhs);
const len = switch (dst_ty.ptrSize(zcu)) {
.Slice => blk: {
- const slice_len = try func.sliceLen(dst);
+ const slice_len = try cg.sliceLen(dst);
if (ptr_elem_ty.abiSize(zcu) != 1) {
- try func.emitWValue(slice_len);
- try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(zcu))) });
- try func.addTag(.i32_mul);
- try func.addLabel(.local_set, slice_len.local.value);
+ try cg.emitWValue(slice_len);
+ try cg.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(zcu))) });
+ try cg.addTag(.i32_mul);
+ try cg.addLocal(.local_set, slice_len.local.value);
}
break :blk slice_len;
},
@@ -5818,96 +5703,94 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}),
.C, .Many => unreachable,
};
- const dst_ptr = try func.sliceOrArrayPtr(dst, dst_ty);
- const src_ptr = try func.sliceOrArrayPtr(src, src_ty);
- try func.memcpy(dst_ptr, src_ptr, len);
+ const dst_ptr = try cg.sliceOrArrayPtr(dst, dst_ty);
+ const src_ptr = try cg.sliceOrArrayPtr(src, src_ty);
+ try cg.memcpy(dst_ptr, src_ptr, len);
- return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+fn airRetAddr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// TODO: Implement this properly once stack serialization is solved
- return func.finishAir(inst, switch (func.arch()) {
+ return cg.finishAir(inst, switch (cg.ptr_size) {
.wasm32 => .{ .imm32 = 0 },
.wasm64 => .{ .imm64 = 0 },
- else => unreachable,
}, &.{});
}
-fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+fn airPopcount(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pt = cg.pt;
const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const op_ty = func.typeOf(ty_op.operand);
+ const operand = try cg.resolveInst(ty_op.operand);
+ const op_ty = cg.typeOf(ty_op.operand);
if (op_ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO: Implement @popCount for vectors", .{});
+ return cg.fail("TODO: Implement @popCount for vectors", .{});
}
const int_info = op_ty.intInfo(zcu);
const bits = int_info.bits;
const wasm_bits = toWasmBits(bits) orelse {
- return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits});
+ return cg.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits});
};
switch (wasm_bits) {
32 => {
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
if (op_ty.isSignedInt(zcu) and bits != wasm_bits) {
- _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits));
+ _ = try cg.wrapOperand(.stack, try pt.intType(.unsigned, bits));
}
- try func.addTag(.i32_popcnt);
+ try cg.addTag(.i32_popcnt);
},
64 => {
- try func.emitWValue(operand);
+ try cg.emitWValue(operand);
if (op_ty.isSignedInt(zcu) and bits != wasm_bits) {
- _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits));
+ _ = try cg.wrapOperand(.stack, try pt.intType(.unsigned, bits));
}
- try func.addTag(.i64_popcnt);
- try func.addTag(.i32_wrap_i64);
- try func.emitWValue(operand);
+ try cg.addTag(.i64_popcnt);
+ try cg.addTag(.i32_wrap_i64);
+ try cg.emitWValue(operand);
},
128 => {
- _ = try func.load(operand, Type.u64, 0);
- try func.addTag(.i64_popcnt);
- _ = try func.load(operand, Type.u64, 8);
+ _ = try cg.load(operand, Type.u64, 0);
+ try cg.addTag(.i64_popcnt);
+ _ = try cg.load(operand, Type.u64, 8);
if (op_ty.isSignedInt(zcu) and bits != wasm_bits) {
- _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits - 64));
+ _ = try cg.wrapOperand(.stack, try pt.intType(.unsigned, bits - 64));
}
- try func.addTag(.i64_popcnt);
- try func.addTag(.i64_add);
- try func.addTag(.i32_wrap_i64);
+ try cg.addTag(.i64_popcnt);
+ try cg.addTag(.i64_add);
+ try cg.addTag(.i32_wrap_i64);
},
else => unreachable,
}
- return func.finishAir(inst, .stack, &.{ty_op.operand});
+ return cg.finishAir(inst, .stack, &.{ty_op.operand});
}
-fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airBitReverse(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand = try func.resolveInst(ty_op.operand);
- const ty = func.typeOf(ty_op.operand);
+ const operand = try cg.resolveInst(ty_op.operand);
+ const ty = cg.typeOf(ty_op.operand);
if (ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO: Implement @bitReverse for vectors", .{});
+ return cg.fail("TODO: Implement @bitReverse for vectors", .{});
}
const int_info = ty.intInfo(zcu);
const bits = int_info.bits;
const wasm_bits = toWasmBits(bits) orelse {
- return func.fail("TODO: Implement @bitReverse for integers with bitsize '{d}'", .{bits});
+ return cg.fail("TODO: Implement @bitReverse for integers with bitsize '{d}'", .{bits});
};
switch (wasm_bits) {
32 => {
- const intrin_ret = try func.callIntrinsic(
- "__bitreversesi2",
+ const intrin_ret = try cg.callIntrinsic(
+ .__bitreversesi2,
&.{.u32_type},
Type.u32,
&.{operand},
@@ -5915,12 +5798,12 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result = if (bits == 32)
intrin_ret
else
- try func.binOp(intrin_ret, .{ .imm32 = 32 - bits }, ty, .shr);
- return func.finishAir(inst, result, &.{ty_op.operand});
+ try cg.binOp(intrin_ret, .{ .imm32 = 32 - bits }, ty, .shr);
+ return cg.finishAir(inst, result, &.{ty_op.operand});
},
64 => {
- const intrin_ret = try func.callIntrinsic(
- "__bitreversedi2",
+ const intrin_ret = try cg.callIntrinsic(
+ .__bitreversedi2,
&.{.u64_type},
Type.u64,
&.{operand},
@@ -5928,68 +5811,63 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result = if (bits == 64)
intrin_ret
else
- try func.binOp(intrin_ret, .{ .imm64 = 64 - bits }, ty, .shr);
- return func.finishAir(inst, result, &.{ty_op.operand});
+ try cg.binOp(intrin_ret, .{ .imm64 = 64 - bits }, ty, .shr);
+ return cg.finishAir(inst, result, &.{ty_op.operand});
},
128 => {
- const result = try func.allocStack(ty);
+ const result = try cg.allocStack(ty);
- try func.emitWValue(result);
- const first_half = try func.load(operand, Type.u64, 8);
- const intrin_ret_first = try func.callIntrinsic(
- "__bitreversedi2",
+ try cg.emitWValue(result);
+ const first_half = try cg.load(operand, Type.u64, 8);
+ const intrin_ret_first = try cg.callIntrinsic(
+ .__bitreversedi2,
&.{.u64_type},
Type.u64,
&.{first_half},
);
- try func.emitWValue(intrin_ret_first);
+ try cg.emitWValue(intrin_ret_first);
if (bits < 128) {
- try func.emitWValue(.{ .imm64 = 128 - bits });
- try func.addTag(.i64_shr_u);
+ try cg.emitWValue(.{ .imm64 = 128 - bits });
+ try cg.addTag(.i64_shr_u);
}
- try func.emitWValue(result);
- const second_half = try func.load(operand, Type.u64, 0);
- const intrin_ret_second = try func.callIntrinsic(
- "__bitreversedi2",
+ try cg.emitWValue(result);
+ const second_half = try cg.load(operand, Type.u64, 0);
+ const intrin_ret_second = try cg.callIntrinsic(
+ .__bitreversedi2,
&.{.u64_type},
Type.u64,
&.{second_half},
);
- try func.emitWValue(intrin_ret_second);
+ try cg.emitWValue(intrin_ret_second);
if (bits == 128) {
- try func.store(.stack, .stack, Type.u64, result.offset() + 8);
- try func.store(.stack, .stack, Type.u64, result.offset());
+ try cg.store(.stack, .stack, Type.u64, result.offset() + 8);
+ try cg.store(.stack, .stack, Type.u64, result.offset());
} else {
- var tmp = try func.allocLocal(Type.u64);
- defer tmp.free(func);
- try func.addLabel(.local_tee, tmp.local.value);
- try func.emitWValue(.{ .imm64 = 128 - bits });
+ var tmp = try cg.allocLocal(Type.u64);
+ defer tmp.free(cg);
+ try cg.addLocal(.local_tee, tmp.local.value);
+ try cg.emitWValue(.{ .imm64 = 128 - bits });
if (ty.isSignedInt(zcu)) {
- try func.addTag(.i64_shr_s);
+ try cg.addTag(.i64_shr_s);
} else {
- try func.addTag(.i64_shr_u);
+ try cg.addTag(.i64_shr_u);
}
- try func.store(.stack, .stack, Type.u64, result.offset() + 8);
- try func.addLabel(.local_get, tmp.local.value);
- try func.emitWValue(.{ .imm64 = bits - 64 });
- try func.addTag(.i64_shl);
- try func.addTag(.i64_or);
- try func.store(.stack, .stack, Type.u64, result.offset());
+ try cg.store(.stack, .stack, Type.u64, result.offset() + 8);
+ try cg.addLocal(.local_get, tmp.local.value);
+ try cg.emitWValue(.{ .imm64 = bits - 64 });
+ try cg.addTag(.i64_shl);
+ try cg.addTag(.i64_or);
+ try cg.store(.stack, .stack, Type.u64, result.offset());
}
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
},
else => unreachable,
}
}
-fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
-
- const operand = try func.resolveInst(un_op);
- // First retrieve the symbol index to the error name table
- // that will be used to emit a relocation for the pointer
- // to the error name table.
- //
+fn airErrorName(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const operand = try cg.resolveInst(un_op);
// Each entry to this table is a slice (ptr+len).
// The operand in this instruction represents the index within this table.
// This means to get the final name, we emit the base pointer and then perform
@@ -5997,82 +5875,82 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
//
// As the names are global and the slice elements are constant, we do not have
// to make a copy of the ptr+value but can point towards them directly.
- const pt = func.pt;
- const error_table_symbol = try func.bin_file.getErrorTableSymbol(pt);
+ const pt = cg.pt;
const name_ty = Type.slice_const_u8_sentinel_0;
const abi_size = name_ty.abiSize(pt.zcu);
- const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation
- try func.emitWValue(error_name_value);
- try func.emitWValue(operand);
- switch (func.arch()) {
+ cg.wasm.error_name_table_ref_count += 1;
+
+ // Lowers to a i32.const or i64.const with the error table memory address.
+ try cg.addTag(.error_name_table_ref);
+ try cg.emitWValue(operand);
+ switch (cg.ptr_size) {
.wasm32 => {
- try func.addImm32(@intCast(abi_size));
- try func.addTag(.i32_mul);
- try func.addTag(.i32_add);
+ try cg.addImm32(@intCast(abi_size));
+ try cg.addTag(.i32_mul);
+ try cg.addTag(.i32_add);
},
.wasm64 => {
- try func.addImm64(abi_size);
- try func.addTag(.i64_mul);
- try func.addTag(.i64_add);
+ try cg.addImm64(abi_size);
+ try cg.addTag(.i64_mul);
+ try cg.addTag(.i64_add);
},
- else => unreachable,
}
- return func.finishAir(inst, .stack, &.{un_op});
+ return cg.finishAir(inst, .stack, &.{un_op});
}
-fn airPtrSliceFieldPtr(func: *CodeGen, inst: Air.Inst.Index, offset: u32) InnerError!void {
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const slice_ptr = try func.resolveInst(ty_op.operand);
- const result = try func.buildPointerOffset(slice_ptr, offset, .new);
- return func.finishAir(inst, result, &.{ty_op.operand});
+fn airPtrSliceFieldPtr(cg: *CodeGen, inst: Air.Inst.Index, offset: u32) InnerError!void {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const slice_ptr = try cg.resolveInst(ty_op.operand);
+ const result = try cg.buildPointerOffset(slice_ptr, offset, .new);
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
/// NOTE: Allocates place for result on virtual stack, when integer size > 64 bits
-fn intZeroValue(func: *CodeGen, ty: Type) InnerError!WValue {
- const zcu = func.bin_file.base.comp.zcu.?;
+fn intZeroValue(cg: *CodeGen, ty: Type) InnerError!WValue {
+ const zcu = cg.wasm.base.comp.zcu.?;
const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
- return func.fail("TODO: Implement intZeroValue for integer bitsize: {d}", .{int_info.bits});
+ return cg.fail("TODO: Implement intZeroValue for integer bitsize: {d}", .{int_info.bits});
};
switch (wasm_bits) {
32 => return .{ .imm32 = 0 },
64 => return .{ .imm64 = 0 },
128 => {
- const result = try func.allocStack(ty);
- try func.store(result, .{ .imm64 = 0 }, Type.u64, 0);
- try func.store(result, .{ .imm64 = 0 }, Type.u64, 8);
+ const result = try cg.allocStack(ty);
+ try cg.store(result, .{ .imm64 = 0 }, Type.u64, 0);
+ try cg.store(result, .{ .imm64 = 0 }, Type.u64, 8);
return result;
},
else => unreachable,
}
}
-fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+fn airAddSubWithOverflow(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
assert(op == .add or op == .sub);
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.Bin, ty_pl.payload).data;
- const lhs = try func.resolveInst(extra.lhs);
- const rhs = try func.resolveInst(extra.rhs);
- const ty = func.typeOf(extra.lhs);
- const pt = func.pt;
+ const lhs = try cg.resolveInst(extra.lhs);
+ const rhs = try cg.resolveInst(extra.rhs);
+ const ty = cg.typeOf(extra.lhs);
+ const pt = cg.pt;
const zcu = pt.zcu;
if (ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
+ return cg.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
const int_info = ty.intInfo(zcu);
const is_signed = int_info.signedness == .signed;
if (int_info.bits > 128) {
- return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits});
+ return cg.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits});
}
- const op_result = try func.wrapBinOp(lhs, rhs, ty, op);
- var op_tmp = try op_result.toLocal(func, ty);
- defer op_tmp.free(func);
+ const op_result = try cg.wrapBinOp(lhs, rhs, ty, op);
+ var op_tmp = try op_result.toLocal(cg, ty);
+ defer op_tmp.free(cg);
const cmp_op: std.math.CompareOperator = switch (op) {
.add => .lt,
@@ -6080,40 +5958,40 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
else => unreachable,
};
const overflow_bit = if (is_signed) blk: {
- const zero = try intZeroValue(func, ty);
- const rhs_is_neg = try func.cmp(rhs, zero, ty, .lt);
- const overflow_cmp = try func.cmp(op_tmp, lhs, ty, cmp_op);
- break :blk try func.cmp(rhs_is_neg, overflow_cmp, Type.u1, .neq);
- } else try func.cmp(op_tmp, lhs, ty, cmp_op);
- var bit_tmp = try overflow_bit.toLocal(func, Type.u1);
- defer bit_tmp.free(func);
-
- const result = try func.allocStack(func.typeOfIndex(inst));
+ const zero = try intZeroValue(cg, ty);
+ const rhs_is_neg = try cg.cmp(rhs, zero, ty, .lt);
+ const overflow_cmp = try cg.cmp(op_tmp, lhs, ty, cmp_op);
+ break :blk try cg.cmp(rhs_is_neg, overflow_cmp, Type.u1, .neq);
+ } else try cg.cmp(op_tmp, lhs, ty, cmp_op);
+ var bit_tmp = try overflow_bit.toLocal(cg, Type.u1);
+ defer bit_tmp.free(cg);
+
+ const result = try cg.allocStack(cg.typeOfIndex(inst));
const offset: u32 = @intCast(ty.abiSize(zcu));
- try func.store(result, op_tmp, ty, 0);
- try func.store(result, bit_tmp, Type.u1, offset);
+ try cg.store(result, op_tmp, ty, 0);
+ try cg.store(result, bit_tmp, Type.u1, offset);
- return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
+ return cg.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
}
-fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+fn airShlWithOverflow(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pt = cg.pt;
const zcu = pt.zcu;
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.Bin, ty_pl.payload).data;
- const lhs = try func.resolveInst(extra.lhs);
- const rhs = try func.resolveInst(extra.rhs);
- const ty = func.typeOf(extra.lhs);
- const rhs_ty = func.typeOf(extra.rhs);
+ const lhs = try cg.resolveInst(extra.lhs);
+ const rhs = try cg.resolveInst(extra.rhs);
+ const ty = cg.typeOf(extra.lhs);
+ const rhs_ty = cg.typeOf(extra.rhs);
if (ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
+ return cg.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
- return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
+ return cg.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
};
// Ensure rhs is coerced to lhs as they must have the same WebAssembly types
@@ -6121,50 +5999,50 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(zcu).bits).?;
// If wasm_bits == 128, compiler-rt expects i32 for shift
const rhs_final = if (wasm_bits != rhs_wasm_bits and wasm_bits == 64) blk: {
- const rhs_casted = try func.intcast(rhs, rhs_ty, ty);
- break :blk try rhs_casted.toLocal(func, ty);
+ const rhs_casted = try cg.intcast(rhs, rhs_ty, ty);
+ break :blk try rhs_casted.toLocal(cg, ty);
} else rhs;
- var shl = try (try func.wrapBinOp(lhs, rhs_final, ty, .shl)).toLocal(func, ty);
- defer shl.free(func);
+ var shl = try (try cg.wrapBinOp(lhs, rhs_final, ty, .shl)).toLocal(cg, ty);
+ defer shl.free(cg);
const overflow_bit = blk: {
- const shr = try func.binOp(shl, rhs_final, ty, .shr);
- break :blk try func.cmp(shr, lhs, ty, .neq);
+ const shr = try cg.binOp(shl, rhs_final, ty, .shr);
+ break :blk try cg.cmp(shr, lhs, ty, .neq);
};
- var overflow_local = try overflow_bit.toLocal(func, Type.u1);
- defer overflow_local.free(func);
+ var overflow_local = try overflow_bit.toLocal(cg, Type.u1);
+ defer overflow_local.free(cg);
- const result = try func.allocStack(func.typeOfIndex(inst));
+ const result = try cg.allocStack(cg.typeOfIndex(inst));
const offset: u32 = @intCast(ty.abiSize(zcu));
- try func.store(result, shl, ty, 0);
- try func.store(result, overflow_local, Type.u1, offset);
+ try cg.store(result, shl, ty, 0);
+ try cg.store(result, overflow_local, Type.u1, offset);
- return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
+ return cg.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
}
-fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
+fn airMulWithOverflow(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.Bin, ty_pl.payload).data;
- const lhs = try func.resolveInst(extra.lhs);
- const rhs = try func.resolveInst(extra.rhs);
- const ty = func.typeOf(extra.lhs);
- const pt = func.pt;
+ const lhs = try cg.resolveInst(extra.lhs);
+ const rhs = try cg.resolveInst(extra.rhs);
+ const ty = cg.typeOf(extra.lhs);
+ const pt = cg.pt;
const zcu = pt.zcu;
if (ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
+ return cg.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
// We store the bit if it's overflowed or not in this. As it's zero-initialized
// we only need to update it if an overflow (or underflow) occurred.
- var overflow_bit = try func.ensureAllocLocal(Type.u1);
- defer overflow_bit.free(func);
+ var overflow_bit = try cg.ensureAllocLocal(Type.u1);
+ defer overflow_bit.free(cg);
const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
- return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits});
+ return cg.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits});
};
const zero: WValue = switch (wasm_bits) {
@@ -6176,248 +6054,250 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// for 32 bit integers we upcast it to a 64bit integer
const mul = if (wasm_bits == 32) blk: {
const new_ty = if (int_info.signedness == .signed) Type.i64 else Type.u64;
- const lhs_upcast = try func.intcast(lhs, ty, new_ty);
- const rhs_upcast = try func.intcast(rhs, ty, new_ty);
- const bin_op = try (try func.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(func, new_ty);
- const res = try (try func.trunc(bin_op, ty, new_ty)).toLocal(func, ty);
- const res_upcast = try func.intcast(res, ty, new_ty);
- _ = try func.cmp(res_upcast, bin_op, new_ty, .neq);
- try func.addLabel(.local_set, overflow_bit.local.value);
+ const lhs_upcast = try cg.intcast(lhs, ty, new_ty);
+ const rhs_upcast = try cg.intcast(rhs, ty, new_ty);
+ const bin_op = try (try cg.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(cg, new_ty);
+ const res = try (try cg.trunc(bin_op, ty, new_ty)).toLocal(cg, ty);
+ const res_upcast = try cg.intcast(res, ty, new_ty);
+ _ = try cg.cmp(res_upcast, bin_op, new_ty, .neq);
+ try cg.addLocal(.local_set, overflow_bit.local.value);
break :blk res;
} else if (wasm_bits == 64) blk: {
const new_ty = if (int_info.signedness == .signed) Type.i128 else Type.u128;
- const lhs_upcast = try func.intcast(lhs, ty, new_ty);
- const rhs_upcast = try func.intcast(rhs, ty, new_ty);
- const bin_op = try (try func.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(func, new_ty);
- const res = try (try func.trunc(bin_op, ty, new_ty)).toLocal(func, ty);
- const res_upcast = try func.intcast(res, ty, new_ty);
- _ = try func.cmp(res_upcast, bin_op, new_ty, .neq);
- try func.addLabel(.local_set, overflow_bit.local.value);
+ const lhs_upcast = try cg.intcast(lhs, ty, new_ty);
+ const rhs_upcast = try cg.intcast(rhs, ty, new_ty);
+ const bin_op = try (try cg.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(cg, new_ty);
+ const res = try (try cg.trunc(bin_op, ty, new_ty)).toLocal(cg, ty);
+ const res_upcast = try cg.intcast(res, ty, new_ty);
+ _ = try cg.cmp(res_upcast, bin_op, new_ty, .neq);
+ try cg.addLocal(.local_set, overflow_bit.local.value);
break :blk res;
} else if (int_info.bits == 128 and int_info.signedness == .unsigned) blk: {
- var lhs_lsb = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
- defer lhs_lsb.free(func);
- var lhs_msb = try (try func.load(lhs, Type.u64, 8)).toLocal(func, Type.u64);
- defer lhs_msb.free(func);
- var rhs_lsb = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64);
- defer rhs_lsb.free(func);
- var rhs_msb = try (try func.load(rhs, Type.u64, 8)).toLocal(func, Type.u64);
- defer rhs_msb.free(func);
-
- const cross_1 = try func.callIntrinsic(
- "__multi3",
+ var lhs_lsb = try (try cg.load(lhs, Type.u64, 0)).toLocal(cg, Type.u64);
+ defer lhs_lsb.free(cg);
+ var lhs_msb = try (try cg.load(lhs, Type.u64, 8)).toLocal(cg, Type.u64);
+ defer lhs_msb.free(cg);
+ var rhs_lsb = try (try cg.load(rhs, Type.u64, 0)).toLocal(cg, Type.u64);
+ defer rhs_lsb.free(cg);
+ var rhs_msb = try (try cg.load(rhs, Type.u64, 8)).toLocal(cg, Type.u64);
+ defer rhs_msb.free(cg);
+
+ const cross_1 = try cg.callIntrinsic(
+ .__multi3,
&[_]InternPool.Index{.i64_type} ** 4,
Type.i128,
&.{ lhs_msb, zero, rhs_lsb, zero },
);
- const cross_2 = try func.callIntrinsic(
- "__multi3",
+ const cross_2 = try cg.callIntrinsic(
+ .__multi3,
&[_]InternPool.Index{.i64_type} ** 4,
Type.i128,
&.{ rhs_msb, zero, lhs_lsb, zero },
);
- const mul_lsb = try func.callIntrinsic(
- "__multi3",
+ const mul_lsb = try cg.callIntrinsic(
+ .__multi3,
&[_]InternPool.Index{.i64_type} ** 4,
Type.i128,
&.{ rhs_lsb, zero, lhs_lsb, zero },
);
- const rhs_msb_not_zero = try func.cmp(rhs_msb, zero, Type.u64, .neq);
- const lhs_msb_not_zero = try func.cmp(lhs_msb, zero, Type.u64, .neq);
- const both_msb_not_zero = try func.binOp(rhs_msb_not_zero, lhs_msb_not_zero, Type.bool, .@"and");
- const cross_1_msb = try func.load(cross_1, Type.u64, 8);
- const cross_1_msb_not_zero = try func.cmp(cross_1_msb, zero, Type.u64, .neq);
- const cond_1 = try func.binOp(both_msb_not_zero, cross_1_msb_not_zero, Type.bool, .@"or");
- const cross_2_msb = try func.load(cross_2, Type.u64, 8);
- const cross_2_msb_not_zero = try func.cmp(cross_2_msb, zero, Type.u64, .neq);
- const cond_2 = try func.binOp(cond_1, cross_2_msb_not_zero, Type.bool, .@"or");
-
- const cross_1_lsb = try func.load(cross_1, Type.u64, 0);
- const cross_2_lsb = try func.load(cross_2, Type.u64, 0);
- const cross_add = try func.binOp(cross_1_lsb, cross_2_lsb, Type.u64, .add);
-
- var mul_lsb_msb = try (try func.load(mul_lsb, Type.u64, 8)).toLocal(func, Type.u64);
- defer mul_lsb_msb.free(func);
- var all_add = try (try func.binOp(cross_add, mul_lsb_msb, Type.u64, .add)).toLocal(func, Type.u64);
- defer all_add.free(func);
- const add_overflow = try func.cmp(all_add, mul_lsb_msb, Type.u64, .lt);
+ const rhs_msb_not_zero = try cg.cmp(rhs_msb, zero, Type.u64, .neq);
+ const lhs_msb_not_zero = try cg.cmp(lhs_msb, zero, Type.u64, .neq);
+ const both_msb_not_zero = try cg.binOp(rhs_msb_not_zero, lhs_msb_not_zero, Type.bool, .@"and");
+ const cross_1_msb = try cg.load(cross_1, Type.u64, 8);
+ const cross_1_msb_not_zero = try cg.cmp(cross_1_msb, zero, Type.u64, .neq);
+ const cond_1 = try cg.binOp(both_msb_not_zero, cross_1_msb_not_zero, Type.bool, .@"or");
+ const cross_2_msb = try cg.load(cross_2, Type.u64, 8);
+ const cross_2_msb_not_zero = try cg.cmp(cross_2_msb, zero, Type.u64, .neq);
+ const cond_2 = try cg.binOp(cond_1, cross_2_msb_not_zero, Type.bool, .@"or");
+
+ const cross_1_lsb = try cg.load(cross_1, Type.u64, 0);
+ const cross_2_lsb = try cg.load(cross_2, Type.u64, 0);
+ const cross_add = try cg.binOp(cross_1_lsb, cross_2_lsb, Type.u64, .add);
+
+ var mul_lsb_msb = try (try cg.load(mul_lsb, Type.u64, 8)).toLocal(cg, Type.u64);
+ defer mul_lsb_msb.free(cg);
+ var all_add = try (try cg.binOp(cross_add, mul_lsb_msb, Type.u64, .add)).toLocal(cg, Type.u64);
+ defer all_add.free(cg);
+ const add_overflow = try cg.cmp(all_add, mul_lsb_msb, Type.u64, .lt);
// result for overflow bit
- _ = try func.binOp(cond_2, add_overflow, Type.bool, .@"or");
- try func.addLabel(.local_set, overflow_bit.local.value);
-
- const tmp_result = try func.allocStack(Type.u128);
- try func.emitWValue(tmp_result);
- const mul_lsb_lsb = try func.load(mul_lsb, Type.u64, 0);
- try func.store(.stack, mul_lsb_lsb, Type.u64, tmp_result.offset());
- try func.store(tmp_result, all_add, Type.u64, 8);
+ _ = try cg.binOp(cond_2, add_overflow, Type.bool, .@"or");
+ try cg.addLocal(.local_set, overflow_bit.local.value);
+
+ const tmp_result = try cg.allocStack(Type.u128);
+ try cg.emitWValue(tmp_result);
+ const mul_lsb_lsb = try cg.load(mul_lsb, Type.u64, 0);
+ try cg.store(.stack, mul_lsb_lsb, Type.u64, tmp_result.offset());
+ try cg.store(tmp_result, all_add, Type.u64, 8);
break :blk tmp_result;
} else if (int_info.bits == 128 and int_info.signedness == .signed) blk: {
- const overflow_ret = try func.allocStack(Type.i32);
- const res = try func.callIntrinsic(
- "__muloti4",
+ const overflow_ret = try cg.allocStack(Type.i32);
+ const res = try cg.callIntrinsic(
+ .__muloti4,
&[_]InternPool.Index{ .i128_type, .i128_type, .usize_type },
Type.i128,
&.{ lhs, rhs, overflow_ret },
);
- _ = try func.load(overflow_ret, Type.i32, 0);
- try func.addLabel(.local_set, overflow_bit.local.value);
+ _ = try cg.load(overflow_ret, Type.i32, 0);
+ try cg.addLocal(.local_set, overflow_bit.local.value);
break :blk res;
- } else return func.fail("TODO: @mulWithOverflow for {}", .{ty.fmt(pt)});
- var bin_op_local = try mul.toLocal(func, ty);
- defer bin_op_local.free(func);
+ } else return cg.fail("TODO: @mulWithOverflow for {}", .{ty.fmt(pt)});
+ var bin_op_local = try mul.toLocal(cg, ty);
+ defer bin_op_local.free(cg);
- const result = try func.allocStack(func.typeOfIndex(inst));
+ const result = try cg.allocStack(cg.typeOfIndex(inst));
const offset: u32 = @intCast(ty.abiSize(zcu));
- try func.store(result, bin_op_local, ty, 0);
- try func.store(result, overflow_bit, Type.u1, offset);
+ try cg.store(result, bin_op_local, ty, 0);
+ try cg.store(result, overflow_bit, Type.u1, offset);
- return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
+ return cg.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
}
-fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
- assert(op == .max or op == .min);
- const pt = func.pt;
- const zcu = pt.zcu;
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airMaxMin(
+ cg: *CodeGen,
+ inst: Air.Inst.Index,
+ op: enum { fmax, fmin },
+ cmp_op: std.math.CompareOperator,
+) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const ty = func.typeOfIndex(inst);
+ const ty = cg.typeOfIndex(inst);
if (ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{});
+ return cg.fail("TODO: `@maximum` and `@minimum` for vectors", .{});
}
if (ty.abiSize(zcu) > 16) {
- return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{});
+ return cg.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{});
}
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
if (ty.zigTypeTag(zcu) == .float) {
- var fn_name_buf: [64]u8 = undefined;
- const float_bits = ty.floatBits(func.target.*);
- const fn_name = std.fmt.bufPrint(&fn_name_buf, "{s}f{s}{s}", .{
- target_util.libcFloatPrefix(float_bits),
- @tagName(op),
- target_util.libcFloatSuffix(float_bits),
- }) catch unreachable;
- const result = try func.callIntrinsic(fn_name, &.{ ty.ip_index, ty.ip_index }, ty, &.{ lhs, rhs });
- try func.lowerToStack(result);
+ const intrinsic = switch (op) {
+ inline .fmin, .fmax => |ct_op| switch (ty.floatBits(cg.target.*)) {
+ inline 16, 32, 64, 80, 128 => |bits| @field(
+ Mir.Intrinsic,
+ libcFloatPrefix(bits) ++ @tagName(ct_op) ++ libcFloatSuffix(bits),
+ ),
+ else => unreachable,
+ },
+ };
+ const result = try cg.callIntrinsic(intrinsic, &.{ ty.ip_index, ty.ip_index }, ty, &.{ lhs, rhs });
+ try cg.lowerToStack(result);
} else {
// operands to select from
- try func.lowerToStack(lhs);
- try func.lowerToStack(rhs);
- _ = try func.cmp(lhs, rhs, ty, if (op == .max) .gt else .lt);
+ try cg.lowerToStack(lhs);
+ try cg.lowerToStack(rhs);
+ _ = try cg.cmp(lhs, rhs, ty, cmp_op);
// based on the result from comparison, return operand 0 or 1.
- try func.addTag(.select);
+ try cg.addTag(.select);
}
- return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
- const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
+fn airMulAdd(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const bin_op = cg.air.extraData(Air.Bin, pl_op.payload).data;
- const ty = func.typeOfIndex(inst);
+ const ty = cg.typeOfIndex(inst);
if (ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO: `@mulAdd` for vectors", .{});
+ return cg.fail("TODO: `@mulAdd` for vectors", .{});
}
- const addend = try func.resolveInst(pl_op.operand);
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
+ const addend = try cg.resolveInst(pl_op.operand);
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
- const result = if (ty.floatBits(func.target.*) == 16) fl_result: {
- const rhs_ext = try func.fpext(rhs, ty, Type.f32);
- const lhs_ext = try func.fpext(lhs, ty, Type.f32);
- const addend_ext = try func.fpext(addend, ty, Type.f32);
+ const result = if (ty.floatBits(cg.target.*) == 16) fl_result: {
+ const rhs_ext = try cg.fpext(rhs, ty, Type.f32);
+ const lhs_ext = try cg.fpext(lhs, ty, Type.f32);
+ const addend_ext = try cg.fpext(addend, ty, Type.f32);
// call to compiler-rt `fn fmaf(f32, f32, f32) f32`
- const result = try func.callIntrinsic(
- "fmaf",
+ const result = try cg.callIntrinsic(
+ .fmaf,
&.{ .f32_type, .f32_type, .f32_type },
Type.f32,
&.{ rhs_ext, lhs_ext, addend_ext },
);
- break :fl_result try func.fptrunc(result, Type.f32, ty);
+ break :fl_result try cg.fptrunc(result, Type.f32, ty);
} else result: {
- const mul_result = try func.binOp(lhs, rhs, ty, .mul);
- break :result try func.binOp(mul_result, addend, ty, .add);
+ const mul_result = try cg.binOp(lhs, rhs, ty, .mul);
+ break :result try cg.binOp(mul_result, addend, ty, .add);
};
- return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
+ return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
}
-fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airClz(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const ty = func.typeOf(ty_op.operand);
+ const ty = cg.typeOf(ty_op.operand);
if (ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO: `@clz` for vectors", .{});
+ return cg.fail("TODO: `@clz` for vectors", .{});
}
- const operand = try func.resolveInst(ty_op.operand);
+ const operand = try cg.resolveInst(ty_op.operand);
const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
- return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
+ return cg.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
};
switch (wasm_bits) {
32 => {
- try func.emitWValue(operand);
- try func.addTag(.i32_clz);
+ try cg.emitWValue(operand);
+ try cg.addTag(.i32_clz);
},
64 => {
- try func.emitWValue(operand);
- try func.addTag(.i64_clz);
- try func.addTag(.i32_wrap_i64);
+ try cg.emitWValue(operand);
+ try cg.addTag(.i64_clz);
+ try cg.addTag(.i32_wrap_i64);
},
128 => {
- var msb = try (try func.load(operand, Type.u64, 8)).toLocal(func, Type.u64);
- defer msb.free(func);
-
- try func.emitWValue(msb);
- try func.addTag(.i64_clz);
- _ = try func.load(operand, Type.u64, 0);
- try func.addTag(.i64_clz);
- try func.emitWValue(.{ .imm64 = 64 });
- try func.addTag(.i64_add);
- _ = try func.cmp(msb, .{ .imm64 = 0 }, Type.u64, .neq);
- try func.addTag(.select);
- try func.addTag(.i32_wrap_i64);
+ var msb = try (try cg.load(operand, Type.u64, 8)).toLocal(cg, Type.u64);
+ defer msb.free(cg);
+
+ try cg.emitWValue(msb);
+ try cg.addTag(.i64_clz);
+ _ = try cg.load(operand, Type.u64, 0);
+ try cg.addTag(.i64_clz);
+ try cg.emitWValue(.{ .imm64 = 64 });
+ try cg.addTag(.i64_add);
+ _ = try cg.cmp(msb, .{ .imm64 = 0 }, Type.u64, .neq);
+ try cg.addTag(.select);
+ try cg.addTag(.i32_wrap_i64);
},
else => unreachable,
}
if (wasm_bits != int_info.bits) {
- try func.emitWValue(.{ .imm32 = wasm_bits - int_info.bits });
- try func.addTag(.i32_sub);
+ try cg.emitWValue(.{ .imm32 = wasm_bits - int_info.bits });
+ try cg.addTag(.i32_sub);
}
- return func.finishAir(inst, .stack, &.{ty_op.operand});
+ return cg.finishAir(inst, .stack, &.{ty_op.operand});
}
-fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airCtz(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const ty = func.typeOf(ty_op.operand);
+ const ty = cg.typeOf(ty_op.operand);
if (ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO: `@ctz` for vectors", .{});
+ return cg.fail("TODO: `@ctz` for vectors", .{});
}
- const operand = try func.resolveInst(ty_op.operand);
+ const operand = try cg.resolveInst(ty_op.operand);
const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
- return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
+ return cg.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
};
switch (wasm_bits) {
@@ -6425,131 +6305,108 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (wasm_bits != int_info.bits) {
const val: u32 = @as(u32, 1) << @as(u5, @intCast(int_info.bits));
// leave value on the stack
- _ = try func.binOp(operand, .{ .imm32 = val }, ty, .@"or");
- } else try func.emitWValue(operand);
- try func.addTag(.i32_ctz);
+ _ = try cg.binOp(operand, .{ .imm32 = val }, ty, .@"or");
+ } else try cg.emitWValue(operand);
+ try cg.addTag(.i32_ctz);
},
64 => {
if (wasm_bits != int_info.bits) {
const val: u64 = @as(u64, 1) << @as(u6, @intCast(int_info.bits));
// leave value on the stack
- _ = try func.binOp(operand, .{ .imm64 = val }, ty, .@"or");
- } else try func.emitWValue(operand);
- try func.addTag(.i64_ctz);
- try func.addTag(.i32_wrap_i64);
+ _ = try cg.binOp(operand, .{ .imm64 = val }, ty, .@"or");
+ } else try cg.emitWValue(operand);
+ try cg.addTag(.i64_ctz);
+ try cg.addTag(.i32_wrap_i64);
},
128 => {
- var lsb = try (try func.load(operand, Type.u64, 0)).toLocal(func, Type.u64);
- defer lsb.free(func);
+ var lsb = try (try cg.load(operand, Type.u64, 0)).toLocal(cg, Type.u64);
+ defer lsb.free(cg);
- try func.emitWValue(lsb);
- try func.addTag(.i64_ctz);
- _ = try func.load(operand, Type.u64, 8);
+ try cg.emitWValue(lsb);
+ try cg.addTag(.i64_ctz);
+ _ = try cg.load(operand, Type.u64, 8);
if (wasm_bits != int_info.bits) {
- try func.addImm64(@as(u64, 1) << @as(u6, @intCast(int_info.bits - 64)));
- try func.addTag(.i64_or);
+ try cg.addImm64(@as(u64, 1) << @as(u6, @intCast(int_info.bits - 64)));
+ try cg.addTag(.i64_or);
}
- try func.addTag(.i64_ctz);
- try func.addImm64(64);
+ try cg.addTag(.i64_ctz);
+ try cg.addImm64(64);
if (wasm_bits != int_info.bits) {
- try func.addTag(.i64_or);
+ try cg.addTag(.i64_or);
} else {
- try func.addTag(.i64_add);
+ try cg.addTag(.i64_add);
}
- _ = try func.cmp(lsb, .{ .imm64 = 0 }, Type.u64, .neq);
- try func.addTag(.select);
- try func.addTag(.i32_wrap_i64);
+ _ = try cg.cmp(lsb, .{ .imm64 = 0 }, Type.u64, .neq);
+ try cg.addTag(.select);
+ try cg.addTag(.i32_wrap_i64);
},
else => unreachable,
}
- return func.finishAir(inst, .stack, &.{ty_op.operand});
+ return cg.finishAir(inst, .stack, &.{ty_op.operand});
}
-fn airDbgStmt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{});
-
- const dbg_stmt = func.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
- try func.addInst(.{ .tag = .dbg_line, .data = .{
- .payload = try func.addExtra(Mir.DbgLineColumn{
+fn airDbgStmt(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const dbg_stmt = cg.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
+ try cg.addInst(.{ .tag = .dbg_line, .data = .{
+ .payload = try cg.addExtra(Mir.DbgLineColumn{
.line = dbg_stmt.line,
.column = dbg_stmt.column,
}),
} });
- return func.finishAir(inst, .none, &.{});
+ return cg.finishAir(inst, .none, &.{});
}
-fn airDbgInlineBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = func.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
+fn airDbgInlineBlock(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
// TODO
- try func.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]));
+ try cg.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]));
}
fn airDbgVar(
- func: *CodeGen,
+ cg: *CodeGen,
inst: Air.Inst.Index,
local_tag: link.File.Dwarf.WipNav.LocalTag,
is_ptr: bool,
) InnerError!void {
_ = is_ptr;
- if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{});
-
- const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
- const ty = func.typeOf(pl_op.operand);
- const operand = try func.resolveInst(pl_op.operand);
-
- log.debug("airDbgVar: %{d}: {}, {}", .{ inst, ty.fmtDebug(), operand });
-
- const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
- log.debug(" var name = ({s})", .{name.toSlice(func.air)});
-
- const loc: link.File.Dwarf.Loc = switch (operand) {
- .local => |local| .{ .wasm_ext = .{ .local = local.value } },
- else => blk: {
- log.debug("TODO generate debug info for {}", .{operand});
- break :blk .empty;
- },
- };
- try func.debug_output.dwarf.genLocalDebugInfo(local_tag, name.toSlice(func.air), ty, loc);
-
- return func.finishAir(inst, .none, &.{});
+ _ = local_tag;
+ return cg.finishAir(inst, .none, &.{});
}
-fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
- const err_union = try func.resolveInst(pl_op.operand);
- const extra = func.air.extraData(Air.Try, pl_op.payload);
- const body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]);
- const err_union_ty = func.typeOf(pl_op.operand);
- const result = try lowerTry(func, inst, err_union, body, err_union_ty, false);
- return func.finishAir(inst, result, &.{pl_op.operand});
+fn airTry(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const err_union = try cg.resolveInst(pl_op.operand);
+ const extra = cg.air.extraData(Air.Try, pl_op.payload);
+ const body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]);
+ const err_union_ty = cg.typeOf(pl_op.operand);
+ const result = try lowerTry(cg, inst, err_union, body, err_union_ty, false);
+ return cg.finishAir(inst, result, &.{pl_op.operand});
}
-fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = func.air.extraData(Air.TryPtr, ty_pl.payload);
- const err_union_ptr = try func.resolveInst(extra.data.ptr);
- const body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]);
- const err_union_ty = func.typeOf(extra.data.ptr).childType(zcu);
- const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true);
- return func.finishAir(inst, result, &.{extra.data.ptr});
+fn airTryPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.TryPtr, ty_pl.payload);
+ const err_union_ptr = try cg.resolveInst(extra.data.ptr);
+ const body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]);
+ const err_union_ty = cg.typeOf(extra.data.ptr).childType(zcu);
+ const result = try lowerTry(cg, inst, err_union_ptr, body, err_union_ty, true);
+ return cg.finishAir(inst, result, &.{extra.data.ptr});
}
fn lowerTry(
- func: *CodeGen,
+ cg: *CodeGen,
inst: Air.Inst.Index,
err_union: WValue,
body: []const Air.Inst.Index,
err_union_ty: Type,
operand_is_ptr: bool,
) InnerError!WValue {
- const pt = func.pt;
- const zcu = pt.zcu;
+ const zcu = cg.pt.zcu;
if (operand_is_ptr) {
- return func.fail("TODO: lowerTry for pointers", .{});
+ return cg.fail("TODO: lowerTry for pointers", .{});
}
const pl_ty = err_union_ty.errorUnionPayload(zcu);
@@ -6557,29 +6414,29 @@ fn lowerTry(
if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
// Block we can jump out of when error is not set
- try func.startBlock(.block, wasm.block_empty);
+ try cg.startBlock(.block, .empty);
// check if the error tag is set for the error union.
- try func.emitWValue(err_union);
+ try cg.emitWValue(err_union);
if (pl_has_bits) {
const err_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
- try func.addMemArg(.i32_load16_u, .{
+ try cg.addMemArg(.i32_load16_u, .{
.offset = err_union.offset() + err_offset,
.alignment = @intCast(Type.anyerror.abiAlignment(zcu).toByteUnits().?),
});
}
- try func.addTag(.i32_eqz);
- try func.addLabel(.br_if, 0); // jump out of block when error is '0'
+ try cg.addTag(.i32_eqz);
+ try cg.addLabel(.br_if, 0); // jump out of block when error is '0'
- const liveness = func.liveness.getCondBr(inst);
- try func.branches.append(func.gpa, .{});
- try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.else_deaths.len + liveness.then_deaths.len);
+ const liveness = cg.liveness.getCondBr(inst);
+ try cg.branches.append(cg.gpa, .{});
+ try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, liveness.else_deaths.len + liveness.then_deaths.len);
defer {
- var branch = func.branches.pop();
- branch.deinit(func.gpa);
+ var branch = cg.branches.pop();
+ branch.deinit(cg.gpa);
}
- try func.genBody(body);
- try func.endBlock();
+ try cg.genBody(body);
+ try cg.endBlock();
}
// if we reach here it means error was not set, and we want the payload
@@ -6588,39 +6445,38 @@ fn lowerTry(
}
const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
- if (isByRef(pl_ty, pt, func.target.*)) {
- return buildPointerOffset(func, err_union, pl_offset, .new);
+ if (isByRef(pl_ty, zcu, cg.target)) {
+ return buildPointerOffset(cg, err_union, pl_offset, .new);
}
- const payload = try func.load(err_union, pl_ty, pl_offset);
- return payload.toLocal(func, pl_ty);
+ const payload = try cg.load(err_union, pl_ty, pl_offset);
+ return payload.toLocal(cg, pl_ty);
}
-fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airByteSwap(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const ty = func.typeOfIndex(inst);
- const operand = try func.resolveInst(ty_op.operand);
+ const ty = cg.typeOfIndex(inst);
+ const operand = try cg.resolveInst(ty_op.operand);
if (ty.zigTypeTag(zcu) == .vector) {
- return func.fail("TODO: @byteSwap for vectors", .{});
+ return cg.fail("TODO: @byteSwap for vectors", .{});
}
const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
- return func.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits});
+ return cg.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits});
};
// bytes are no-op
if (int_info.bits == 8) {
- return func.finishAir(inst, func.reuseOperand(ty_op.operand, operand), &.{ty_op.operand});
+ return cg.finishAir(inst, cg.reuseOperand(ty_op.operand, operand), &.{ty_op.operand});
}
const result = result: {
switch (wasm_bits) {
32 => {
- const intrin_ret = try func.callIntrinsic(
- "__bswapsi2",
+ const intrin_ret = try cg.callIntrinsic(
+ .__bswapsi2,
&.{.u32_type},
Type.u32,
&.{operand},
@@ -6628,11 +6484,11 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result if (int_info.bits == 32)
intrin_ret
else
- try func.binOp(intrin_ret, .{ .imm32 = 32 - int_info.bits }, ty, .shr);
+ try cg.binOp(intrin_ret, .{ .imm32 = 32 - int_info.bits }, ty, .shr);
},
64 => {
- const intrin_ret = try func.callIntrinsic(
- "__bswapdi2",
+ const intrin_ret = try cg.callIntrinsic(
+ .__bswapdi2,
&.{.u64_type},
Type.u64,
&.{operand},
@@ -6640,61 +6496,60 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result if (int_info.bits == 64)
intrin_ret
else
- try func.binOp(intrin_ret, .{ .imm64 = 64 - int_info.bits }, ty, .shr);
+ try cg.binOp(intrin_ret, .{ .imm64 = 64 - int_info.bits }, ty, .shr);
},
- else => return func.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}),
+ else => return cg.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}),
}
};
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airDiv(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const ty = func.typeOfIndex(inst);
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
+ const ty = cg.typeOfIndex(inst);
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
- const result = try func.binOp(lhs, rhs, ty, .div);
- return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+ const result = try cg.binOp(lhs, rhs, ty, .div);
+ return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airDivTrunc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const ty = func.typeOfIndex(inst);
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
+ const ty = cg.typeOfIndex(inst);
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
- const div_result = try func.binOp(lhs, rhs, ty, .div);
+ const div_result = try cg.binOp(lhs, rhs, ty, .div);
if (ty.isAnyFloat()) {
- const trunc_result = try func.floatOp(.trunc, ty, &.{div_result});
- return func.finishAir(inst, trunc_result, &.{ bin_op.lhs, bin_op.rhs });
+ const trunc_result = try cg.floatOp(.trunc, ty, &.{div_result});
+ return cg.finishAir(inst, trunc_result, &.{ bin_op.lhs, bin_op.rhs });
}
- return func.finishAir(inst, div_result, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, div_result, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airDivFloor(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty = func.typeOfIndex(inst);
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
+ const zcu = cg.pt.zcu;
+ const ty = cg.typeOfIndex(inst);
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
if (ty.isUnsignedInt(zcu)) {
- _ = try func.binOp(lhs, rhs, ty, .div);
+ _ = try cg.binOp(lhs, rhs, ty, .div);
} else if (ty.isSignedInt(zcu)) {
const int_bits = ty.intInfo(zcu).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
- return func.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
+ return cg.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
};
if (wasm_bits > 64) {
- return func.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
+ return cg.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
}
const zero: WValue = switch (wasm_bits) {
@@ -6704,108 +6559,108 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
// tee leaves the value on the stack and stores it in a local.
- const quotient = try func.allocLocal(ty);
- _ = try func.binOp(lhs, rhs, ty, .div);
- try func.addLabel(.local_tee, quotient.local.value);
+ const quotient = try cg.allocLocal(ty);
+ _ = try cg.binOp(lhs, rhs, ty, .div);
+ try cg.addLocal(.local_tee, quotient.local.value);
// select takes a 32 bit value as the condition, so in the 64 bit case we use eqz to narrow
// the 64 bit value we want to use as the condition to 32 bits.
// This also inverts the condition (non 0 => 0, 0 => 1), so we put the adjusted and
// non-adjusted quotients on the stack in the opposite order for 32 vs 64 bits.
if (wasm_bits == 64) {
- try func.emitWValue(quotient);
+ try cg.emitWValue(quotient);
}
// 0 if the signs of rhs_wasm and lhs_wasm are the same, 1 otherwise.
- _ = try func.binOp(lhs, rhs, ty, .xor);
- _ = try func.cmp(.stack, zero, ty, .lt);
+ _ = try cg.binOp(lhs, rhs, ty, .xor);
+ _ = try cg.cmp(.stack, zero, ty, .lt);
switch (wasm_bits) {
32 => {
- try func.addTag(.i32_sub);
- try func.emitWValue(quotient);
+ try cg.addTag(.i32_sub);
+ try cg.emitWValue(quotient);
},
64 => {
- try func.addTag(.i64_extend_i32_u);
- try func.addTag(.i64_sub);
+ try cg.addTag(.i64_extend_i32_u);
+ try cg.addTag(.i64_sub);
},
else => unreachable,
}
- _ = try func.binOp(lhs, rhs, ty, .rem);
+ _ = try cg.binOp(lhs, rhs, ty, .rem);
if (wasm_bits == 64) {
- try func.addTag(.i64_eqz);
+ try cg.addTag(.i64_eqz);
}
- try func.addTag(.select);
+ try cg.addTag(.select);
// We need to zero the high bits because N bit comparisons consider all 32 or 64 bits, and
// expect all but the lowest N bits to be 0.
// TODO: Should we be zeroing the high bits here or should we be ignoring the high bits
// when performing comparisons?
if (int_bits != wasm_bits) {
- _ = try func.wrapOperand(.stack, ty);
+ _ = try cg.wrapOperand(.stack, ty);
}
} else {
- const float_bits = ty.floatBits(func.target.*);
+ const float_bits = ty.floatBits(cg.target.*);
if (float_bits > 64) {
- return func.fail("TODO: `@divFloor` for floats with bitsize: {d}", .{float_bits});
+ return cg.fail("TODO: `@divFloor` for floats with bitsize: {d}", .{float_bits});
}
const is_f16 = float_bits == 16;
- const lhs_wasm = if (is_f16) try func.fpext(lhs, Type.f16, Type.f32) else lhs;
- const rhs_wasm = if (is_f16) try func.fpext(rhs, Type.f16, Type.f32) else rhs;
+ const lhs_wasm = if (is_f16) try cg.fpext(lhs, Type.f16, Type.f32) else lhs;
+ const rhs_wasm = if (is_f16) try cg.fpext(rhs, Type.f16, Type.f32) else rhs;
- try func.emitWValue(lhs_wasm);
- try func.emitWValue(rhs_wasm);
+ try cg.emitWValue(lhs_wasm);
+ try cg.emitWValue(rhs_wasm);
switch (float_bits) {
16, 32 => {
- try func.addTag(.f32_div);
- try func.addTag(.f32_floor);
+ try cg.addTag(.f32_div);
+ try cg.addTag(.f32_floor);
},
64 => {
- try func.addTag(.f64_div);
- try func.addTag(.f64_floor);
+ try cg.addTag(.f64_div);
+ try cg.addTag(.f64_floor);
},
else => unreachable,
}
if (is_f16) {
- _ = try func.fptrunc(.stack, Type.f32, Type.f16);
+ _ = try cg.fptrunc(.stack, Type.f32, Type.f16);
}
}
- return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airRem(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airRem(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const ty = func.typeOfIndex(inst);
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
+ const ty = cg.typeOfIndex(inst);
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
- const result = try func.binOp(lhs, rhs, ty, .rem);
+ const result = try cg.binOp(lhs, rhs, ty, .rem);
- return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
/// Remainder after floor division, defined by:
/// @divFloor(a, b) * b + @mod(a, b) = a
-fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airMod(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const pt = func.pt;
+ const pt = cg.pt;
const zcu = pt.zcu;
- const ty = func.typeOfIndex(inst);
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
+ const ty = cg.typeOfIndex(inst);
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
const result = result: {
if (ty.isUnsignedInt(zcu)) {
- break :result try func.binOp(lhs, rhs, ty, .rem);
+ break :result try cg.binOp(lhs, rhs, ty, .rem);
}
if (ty.isSignedInt(zcu)) {
// The wasm rem instruction gives the remainder after truncating division (rounding towards
@@ -6814,153 +6669,152 @@ fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// @mod(a, b) = @rem(@rem(a, b) + b, b)
const int_bits = ty.intInfo(zcu).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
- return func.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
+ return cg.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
};
if (wasm_bits > 64) {
- return func.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
+ return cg.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
}
- _ = try func.binOp(lhs, rhs, ty, .rem);
- _ = try func.binOp(.stack, rhs, ty, .add);
- break :result try func.binOp(.stack, rhs, ty, .rem);
+ _ = try cg.binOp(lhs, rhs, ty, .rem);
+ _ = try cg.binOp(.stack, rhs, ty, .add);
+ break :result try cg.binOp(.stack, rhs, ty, .rem);
}
if (ty.isAnyFloat()) {
- const rem = try func.binOp(lhs, rhs, ty, .rem);
- const add = try func.binOp(rem, rhs, ty, .add);
- break :result try func.binOp(add, rhs, ty, .rem);
+ const rem = try cg.binOp(lhs, rhs, ty, .rem);
+ const add = try cg.binOp(rem, rhs, ty, .add);
+ break :result try cg.binOp(add, rhs, ty, .rem);
}
- return func.fail("TODO: @mod for {}", .{ty.fmt(pt)});
+ return cg.fail("TODO: @mod for {}", .{ty.fmt(pt)});
};
- return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airSatMul(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airSatMul(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const pt = func.pt;
+ const pt = cg.pt;
const zcu = pt.zcu;
- const ty = func.typeOfIndex(inst);
+ const ty = cg.typeOfIndex(inst);
const int_info = ty.intInfo(zcu);
const is_signed = int_info.signedness == .signed;
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
const wasm_bits = toWasmBits(int_info.bits) orelse {
- return func.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
+ return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
};
switch (wasm_bits) {
32 => {
const upcast_ty: Type = if (is_signed) Type.i64 else Type.u64;
- const lhs_up = try func.intcast(lhs, ty, upcast_ty);
- const rhs_up = try func.intcast(rhs, ty, upcast_ty);
- var mul_res = try (try func.binOp(lhs_up, rhs_up, upcast_ty, .mul)).toLocal(func, upcast_ty);
- defer mul_res.free(func);
+ const lhs_up = try cg.intcast(lhs, ty, upcast_ty);
+ const rhs_up = try cg.intcast(rhs, ty, upcast_ty);
+ var mul_res = try (try cg.binOp(lhs_up, rhs_up, upcast_ty, .mul)).toLocal(cg, upcast_ty);
+ defer mul_res.free(cg);
if (is_signed) {
const imm_max: WValue = .{ .imm64 = ~@as(u64, 0) >> @intCast(64 - (int_info.bits - 1)) };
- try func.emitWValue(mul_res);
- try func.emitWValue(imm_max);
- _ = try func.cmp(mul_res, imm_max, upcast_ty, .lt);
- try func.addTag(.select);
+ try cg.emitWValue(mul_res);
+ try cg.emitWValue(imm_max);
+ _ = try cg.cmp(mul_res, imm_max, upcast_ty, .lt);
+ try cg.addTag(.select);
- var tmp = try func.allocLocal(upcast_ty);
- defer tmp.free(func);
- try func.addLabel(.local_set, tmp.local.value);
+ var tmp = try cg.allocLocal(upcast_ty);
+ defer tmp.free(cg);
+ try cg.addLocal(.local_set, tmp.local.value);
const imm_min: WValue = .{ .imm64 = ~@as(u64, 0) << @intCast(int_info.bits - 1) };
- try func.emitWValue(tmp);
- try func.emitWValue(imm_min);
- _ = try func.cmp(tmp, imm_min, upcast_ty, .gt);
- try func.addTag(.select);
+ try cg.emitWValue(tmp);
+ try cg.emitWValue(imm_min);
+ _ = try cg.cmp(tmp, imm_min, upcast_ty, .gt);
+ try cg.addTag(.select);
} else {
const imm_max: WValue = .{ .imm64 = ~@as(u64, 0) >> @intCast(64 - int_info.bits) };
- try func.emitWValue(mul_res);
- try func.emitWValue(imm_max);
- _ = try func.cmp(mul_res, imm_max, upcast_ty, .lt);
- try func.addTag(.select);
+ try cg.emitWValue(mul_res);
+ try cg.emitWValue(imm_max);
+ _ = try cg.cmp(mul_res, imm_max, upcast_ty, .lt);
+ try cg.addTag(.select);
}
- try func.addTag(.i32_wrap_i64);
+ try cg.addTag(.i32_wrap_i64);
},
64 => {
if (!(int_info.bits == 64 and int_info.signedness == .signed)) {
- return func.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
+ return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
}
- const overflow_ret = try func.allocStack(Type.i32);
- _ = try func.callIntrinsic(
- "__mulodi4",
+ const overflow_ret = try cg.allocStack(Type.i32);
+ _ = try cg.callIntrinsic(
+ .__mulodi4,
&[_]InternPool.Index{ .i64_type, .i64_type, .usize_type },
Type.i64,
&.{ lhs, rhs, overflow_ret },
);
- const xor = try func.binOp(lhs, rhs, Type.i64, .xor);
- const sign_v = try func.binOp(xor, .{ .imm64 = 63 }, Type.i64, .shr);
- _ = try func.binOp(sign_v, .{ .imm64 = ~@as(u63, 0) }, Type.i64, .xor);
- _ = try func.load(overflow_ret, Type.i32, 0);
- try func.addTag(.i32_eqz);
- try func.addTag(.select);
+ const xor = try cg.binOp(lhs, rhs, Type.i64, .xor);
+ const sign_v = try cg.binOp(xor, .{ .imm64 = 63 }, Type.i64, .shr);
+ _ = try cg.binOp(sign_v, .{ .imm64 = ~@as(u63, 0) }, Type.i64, .xor);
+ _ = try cg.load(overflow_ret, Type.i32, 0);
+ try cg.addTag(.i32_eqz);
+ try cg.addTag(.select);
},
128 => {
if (!(int_info.bits == 128 and int_info.signedness == .signed)) {
- return func.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
+ return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
}
- const overflow_ret = try func.allocStack(Type.i32);
- const ret = try func.callIntrinsic(
- "__muloti4",
+ const overflow_ret = try cg.allocStack(Type.i32);
+ const ret = try cg.callIntrinsic(
+ .__muloti4,
&[_]InternPool.Index{ .i128_type, .i128_type, .usize_type },
Type.i128,
&.{ lhs, rhs, overflow_ret },
);
- try func.lowerToStack(ret);
- const xor = try func.binOp(lhs, rhs, Type.i128, .xor);
- const sign_v = try func.binOp(xor, .{ .imm32 = 127 }, Type.i128, .shr);
+ try cg.lowerToStack(ret);
+ const xor = try cg.binOp(lhs, rhs, Type.i128, .xor);
+ const sign_v = try cg.binOp(xor, .{ .imm32 = 127 }, Type.i128, .shr);
// xor ~@as(u127, 0)
- try func.emitWValue(sign_v);
- const lsb = try func.load(sign_v, Type.u64, 0);
- _ = try func.binOp(lsb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
- try func.store(.stack, .stack, Type.u64, sign_v.offset());
- try func.emitWValue(sign_v);
- const msb = try func.load(sign_v, Type.u64, 8);
- _ = try func.binOp(msb, .{ .imm64 = ~@as(u63, 0) }, Type.u64, .xor);
- try func.store(.stack, .stack, Type.u64, sign_v.offset() + 8);
-
- try func.lowerToStack(sign_v);
- _ = try func.load(overflow_ret, Type.i32, 0);
- try func.addTag(.i32_eqz);
- try func.addTag(.select);
+ try cg.emitWValue(sign_v);
+ const lsb = try cg.load(sign_v, Type.u64, 0);
+ _ = try cg.binOp(lsb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
+ try cg.store(.stack, .stack, Type.u64, sign_v.offset());
+ try cg.emitWValue(sign_v);
+ const msb = try cg.load(sign_v, Type.u64, 8);
+ _ = try cg.binOp(msb, .{ .imm64 = ~@as(u63, 0) }, Type.u64, .xor);
+ try cg.store(.stack, .stack, Type.u64, sign_v.offset() + 8);
+
+ try cg.lowerToStack(sign_v);
+ _ = try cg.load(overflow_ret, Type.i32, 0);
+ try cg.addTag(.i32_eqz);
+ try cg.addTag(.select);
},
else => unreachable,
}
- return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+fn airSatBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
assert(op == .add or op == .sub);
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty = func.typeOfIndex(inst);
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
+ const zcu = cg.pt.zcu;
+ const ty = cg.typeOfIndex(inst);
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
const int_info = ty.intInfo(zcu);
const is_signed = int_info.signedness == .signed;
if (int_info.bits > 64) {
- return func.fail("TODO: saturating arithmetic for integers with bitsize '{d}'", .{int_info.bits});
+ return cg.fail("TODO: saturating arithmetic for integers with bitsize '{d}'", .{int_info.bits});
}
if (is_signed) {
- const result = try signedSat(func, lhs, rhs, ty, op);
- return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+ const result = try signedSat(cg, lhs, rhs, ty, op);
+ return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
const wasm_bits = toWasmBits(int_info.bits).?;
- var bin_result = try (try func.binOp(lhs, rhs, ty, op)).toLocal(func, ty);
- defer bin_result.free(func);
+ var bin_result = try (try cg.binOp(lhs, rhs, ty, op)).toLocal(cg, ty);
+ defer bin_result.free(cg);
if (wasm_bits != int_info.bits and op == .add) {
const val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits))) - 1));
const imm_val: WValue = switch (wasm_bits) {
@@ -6969,25 +6823,25 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
else => unreachable,
};
- try func.emitWValue(bin_result);
- try func.emitWValue(imm_val);
- _ = try func.cmp(bin_result, imm_val, ty, .lt);
+ try cg.emitWValue(bin_result);
+ try cg.emitWValue(imm_val);
+ _ = try cg.cmp(bin_result, imm_val, ty, .lt);
} else {
switch (wasm_bits) {
- 32 => try func.addImm32(if (op == .add) std.math.maxInt(u32) else 0),
- 64 => try func.addImm64(if (op == .add) std.math.maxInt(u64) else 0),
+ 32 => try cg.addImm32(if (op == .add) std.math.maxInt(u32) else 0),
+ 64 => try cg.addImm64(if (op == .add) std.math.maxInt(u64) else 0),
else => unreachable,
}
- try func.emitWValue(bin_result);
- _ = try func.cmp(bin_result, lhs, ty, if (op == .add) .lt else .gt);
+ try cg.emitWValue(bin_result);
+ _ = try cg.cmp(bin_result, lhs, ty, if (op == .add) .lt else .gt);
}
- try func.addTag(.select);
- return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+ try cg.addTag(.select);
+ return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
}
-fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
- const pt = func.pt;
+fn signedSat(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+ const pt = cg.pt;
const zcu = pt.zcu;
const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits).?;
@@ -7007,92 +6861,92 @@ fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr
else => unreachable,
};
- var bin_result = try (try func.binOp(lhs, rhs, ext_ty, op)).toLocal(func, ext_ty);
+ var bin_result = try (try cg.binOp(lhs, rhs, ext_ty, op)).toLocal(cg, ext_ty);
if (!is_wasm_bits) {
- defer bin_result.free(func); // not returned in this branch
- try func.emitWValue(bin_result);
- try func.emitWValue(max_wvalue);
- _ = try func.cmp(bin_result, max_wvalue, ext_ty, .lt);
- try func.addTag(.select);
- try func.addLabel(.local_set, bin_result.local.value); // re-use local
-
- try func.emitWValue(bin_result);
- try func.emitWValue(min_wvalue);
- _ = try func.cmp(bin_result, min_wvalue, ext_ty, .gt);
- try func.addTag(.select);
- try func.addLabel(.local_set, bin_result.local.value); // re-use local
- return (try func.wrapOperand(bin_result, ty)).toLocal(func, ty);
+ defer bin_result.free(cg); // not returned in this branch
+ try cg.emitWValue(bin_result);
+ try cg.emitWValue(max_wvalue);
+ _ = try cg.cmp(bin_result, max_wvalue, ext_ty, .lt);
+ try cg.addTag(.select);
+ try cg.addLocal(.local_set, bin_result.local.value); // re-use local
+
+ try cg.emitWValue(bin_result);
+ try cg.emitWValue(min_wvalue);
+ _ = try cg.cmp(bin_result, min_wvalue, ext_ty, .gt);
+ try cg.addTag(.select);
+ try cg.addLocal(.local_set, bin_result.local.value); // re-use local
+ return (try cg.wrapOperand(bin_result, ty)).toLocal(cg, ty);
} else {
const zero: WValue = switch (wasm_bits) {
32 => .{ .imm32 = 0 },
64 => .{ .imm64 = 0 },
else => unreachable,
};
- try func.emitWValue(max_wvalue);
- try func.emitWValue(min_wvalue);
- _ = try func.cmp(bin_result, zero, ty, .lt);
- try func.addTag(.select);
- try func.emitWValue(bin_result);
+ try cg.emitWValue(max_wvalue);
+ try cg.emitWValue(min_wvalue);
+ _ = try cg.cmp(bin_result, zero, ty, .lt);
+ try cg.addTag(.select);
+ try cg.emitWValue(bin_result);
// leave on stack
- const cmp_zero_result = try func.cmp(rhs, zero, ty, if (op == .add) .lt else .gt);
- const cmp_bin_result = try func.cmp(bin_result, lhs, ty, .lt);
- _ = try func.binOp(cmp_zero_result, cmp_bin_result, Type.u32, .xor); // comparisons always return i32, so provide u32 as type to xor.
- try func.addTag(.select);
- try func.addLabel(.local_set, bin_result.local.value); // re-use local
+ const cmp_zero_result = try cg.cmp(rhs, zero, ty, if (op == .add) .lt else .gt);
+ const cmp_bin_result = try cg.cmp(bin_result, lhs, ty, .lt);
+ _ = try cg.binOp(cmp_zero_result, cmp_bin_result, Type.u32, .xor); // comparisons always return i32, so provide u32 as type to xor.
+ try cg.addTag(.select);
+ try cg.addLocal(.local_set, bin_result.local.value); // re-use local
return bin_result;
}
}
-fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airShlSat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const pt = func.pt;
+ const pt = cg.pt;
const zcu = pt.zcu;
- const ty = func.typeOfIndex(inst);
+ const ty = cg.typeOfIndex(inst);
const int_info = ty.intInfo(zcu);
const is_signed = int_info.signedness == .signed;
if (int_info.bits > 64) {
- return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits});
+ return cg.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits});
}
- const lhs = try func.resolveInst(bin_op.lhs);
- const rhs = try func.resolveInst(bin_op.rhs);
+ const lhs = try cg.resolveInst(bin_op.lhs);
+ const rhs = try cg.resolveInst(bin_op.rhs);
const wasm_bits = toWasmBits(int_info.bits).?;
- const result = try func.allocLocal(ty);
+ const result = try cg.allocLocal(ty);
if (wasm_bits == int_info.bits) {
- var shl = try (try func.binOp(lhs, rhs, ty, .shl)).toLocal(func, ty);
- defer shl.free(func);
- var shr = try (try func.binOp(shl, rhs, ty, .shr)).toLocal(func, ty);
- defer shr.free(func);
+ var shl = try (try cg.binOp(lhs, rhs, ty, .shl)).toLocal(cg, ty);
+ defer shl.free(cg);
+ var shr = try (try cg.binOp(shl, rhs, ty, .shr)).toLocal(cg, ty);
+ defer shr.free(cg);
switch (wasm_bits) {
32 => blk: {
if (!is_signed) {
- try func.addImm32(std.math.maxInt(u32));
+ try cg.addImm32(std.math.maxInt(u32));
break :blk;
}
- try func.addImm32(@bitCast(@as(i32, std.math.minInt(i32))));
- try func.addImm32(@bitCast(@as(i32, std.math.maxInt(i32))));
- _ = try func.cmp(lhs, .{ .imm32 = 0 }, ty, .lt);
- try func.addTag(.select);
+ try cg.addImm32(@bitCast(@as(i32, std.math.minInt(i32))));
+ try cg.addImm32(@bitCast(@as(i32, std.math.maxInt(i32))));
+ _ = try cg.cmp(lhs, .{ .imm32 = 0 }, ty, .lt);
+ try cg.addTag(.select);
},
64 => blk: {
if (!is_signed) {
- try func.addImm64(std.math.maxInt(u64));
+ try cg.addImm64(std.math.maxInt(u64));
break :blk;
}
- try func.addImm64(@bitCast(@as(i64, std.math.minInt(i64))));
- try func.addImm64(@bitCast(@as(i64, std.math.maxInt(i64))));
- _ = try func.cmp(lhs, .{ .imm64 = 0 }, ty, .lt);
- try func.addTag(.select);
+ try cg.addImm64(@bitCast(@as(i64, std.math.minInt(i64))));
+ try cg.addImm64(@bitCast(@as(i64, std.math.maxInt(i64))));
+ _ = try cg.cmp(lhs, .{ .imm64 = 0 }, ty, .lt);
+ try cg.addTag(.select);
},
else => unreachable,
}
- try func.emitWValue(shl);
- _ = try func.cmp(lhs, shr, ty, .neq);
- try func.addTag(.select);
- try func.addLabel(.local_set, result.local.value);
+ try cg.emitWValue(shl);
+ _ = try cg.cmp(lhs, shr, ty, .neq);
+ try cg.addTag(.select);
+ try cg.addLocal(.local_set, result.local.value);
} else {
const shift_size = wasm_bits - int_info.bits;
const shift_value: WValue = switch (wasm_bits) {
@@ -7102,50 +6956,50 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
const ext_ty = try pt.intType(int_info.signedness, wasm_bits);
- var shl_res = try (try func.binOp(lhs, shift_value, ext_ty, .shl)).toLocal(func, ext_ty);
- defer shl_res.free(func);
- var shl = try (try func.binOp(shl_res, rhs, ext_ty, .shl)).toLocal(func, ext_ty);
- defer shl.free(func);
- var shr = try (try func.binOp(shl, rhs, ext_ty, .shr)).toLocal(func, ext_ty);
- defer shr.free(func);
+ var shl_res = try (try cg.binOp(lhs, shift_value, ext_ty, .shl)).toLocal(cg, ext_ty);
+ defer shl_res.free(cg);
+ var shl = try (try cg.binOp(shl_res, rhs, ext_ty, .shl)).toLocal(cg, ext_ty);
+ defer shl.free(cg);
+ var shr = try (try cg.binOp(shl, rhs, ext_ty, .shr)).toLocal(cg, ext_ty);
+ defer shr.free(cg);
switch (wasm_bits) {
32 => blk: {
if (!is_signed) {
- try func.addImm32(std.math.maxInt(u32));
+ try cg.addImm32(std.math.maxInt(u32));
break :blk;
}
- try func.addImm32(@bitCast(@as(i32, std.math.minInt(i32))));
- try func.addImm32(@bitCast(@as(i32, std.math.maxInt(i32))));
- _ = try func.cmp(shl_res, .{ .imm32 = 0 }, ext_ty, .lt);
- try func.addTag(.select);
+ try cg.addImm32(@bitCast(@as(i32, std.math.minInt(i32))));
+ try cg.addImm32(@bitCast(@as(i32, std.math.maxInt(i32))));
+ _ = try cg.cmp(shl_res, .{ .imm32 = 0 }, ext_ty, .lt);
+ try cg.addTag(.select);
},
64 => blk: {
if (!is_signed) {
- try func.addImm64(std.math.maxInt(u64));
+ try cg.addImm64(std.math.maxInt(u64));
break :blk;
}
- try func.addImm64(@bitCast(@as(i64, std.math.minInt(i64))));
- try func.addImm64(@bitCast(@as(i64, std.math.maxInt(i64))));
- _ = try func.cmp(shl_res, .{ .imm64 = 0 }, ext_ty, .lt);
- try func.addTag(.select);
+ try cg.addImm64(@bitCast(@as(i64, std.math.minInt(i64))));
+ try cg.addImm64(@bitCast(@as(i64, std.math.maxInt(i64))));
+ _ = try cg.cmp(shl_res, .{ .imm64 = 0 }, ext_ty, .lt);
+ try cg.addTag(.select);
},
else => unreachable,
}
- try func.emitWValue(shl);
- _ = try func.cmp(shl_res, shr, ext_ty, .neq);
- try func.addTag(.select);
- try func.addLabel(.local_set, result.local.value);
- var shift_result = try func.binOp(result, shift_value, ext_ty, .shr);
+ try cg.emitWValue(shl);
+ _ = try cg.cmp(shl_res, shr, ext_ty, .neq);
+ try cg.addTag(.select);
+ try cg.addLocal(.local_set, result.local.value);
+ var shift_result = try cg.binOp(result, shift_value, ext_ty, .shr);
if (is_signed) {
- shift_result = try func.wrapOperand(shift_result, ty);
+ shift_result = try cg.wrapOperand(shift_result, ty);
}
- try func.addLabel(.local_set, result.local.value);
+ try cg.addLocal(.local_set, result.local.value);
}
- return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
}
/// Calls a compiler-rt intrinsic by creating an undefined symbol,
@@ -7155,31 +7009,23 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// passed as the first parameter.
/// May leave the return value on the stack.
fn callIntrinsic(
- func: *CodeGen,
- name: []const u8,
+ cg: *CodeGen,
+ intrinsic: Mir.Intrinsic,
param_types: []const InternPool.Index,
return_type: Type,
args: []const WValue,
) InnerError!WValue {
assert(param_types.len == args.len);
- const symbol_index = func.bin_file.getGlobalSymbol(name, null) catch |err| {
- return func.fail("Could not find or create global symbol '{s}'", .{@errorName(err)});
- };
+ const zcu = cg.pt.zcu;
// Always pass over C-ABI
- const pt = func.pt;
- const zcu = pt.zcu;
- var func_type = try genFunctype(func.gpa, .{ .wasm_watc = .{} }, param_types, return_type, pt, func.target.*);
- defer func_type.deinit(func.gpa);
- const func_type_index = try func.bin_file.zig_object.?.putOrGetFuncType(func.gpa, func_type);
- try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index);
- const want_sret_param = firstParamSRet(.{ .wasm_watc = .{} }, return_type, pt, func.target.*);
+ const want_sret_param = firstParamSRet(.{ .wasm_watc = .{} }, return_type, zcu, cg.target);
// if we want return as first param, we allocate a pointer to stack,
// and emit it as our first argument
const sret = if (want_sret_param) blk: {
- const sret_local = try func.allocStack(return_type);
- try func.lowerToStack(sret_local);
+ const sret_local = try cg.allocStack(return_type);
+ try cg.lowerToStack(sret_local);
break :blk sret_local;
} else .none;
@@ -7187,16 +7033,15 @@ fn callIntrinsic(
for (args, 0..) |arg, arg_i| {
assert(!(want_sret_param and arg == .stack));
assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(zcu));
- try func.lowerArg(.{ .wasm_watc = .{} }, Type.fromInterned(param_types[arg_i]), arg);
+ try cg.lowerArg(.{ .wasm_watc = .{} }, Type.fromInterned(param_types[arg_i]), arg);
}
- // Actually call our intrinsic
- try func.addLabel(.call, @intFromEnum(symbol_index));
+ try cg.addInst(.{ .tag = .call_intrinsic, .data = .{ .intrinsic = intrinsic } });
if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
return .none;
} else if (return_type.isNoReturn(zcu)) {
- try func.addTag(.@"unreachable");
+ try cg.addTag(.@"unreachable");
return .none;
} else if (want_sret_param) {
return sret;
@@ -7205,194 +7050,30 @@ fn callIntrinsic(
}
}
-fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try func.resolveInst(un_op);
- const enum_ty = func.typeOf(un_op);
-
- const func_sym_index = try func.getTagNameFunction(enum_ty);
+fn airTagName(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const operand = try cg.resolveInst(un_op);
+ const enum_ty = cg.typeOf(un_op);
- const result_ptr = try func.allocStack(func.typeOfIndex(inst));
- try func.lowerToStack(result_ptr);
- try func.emitWValue(operand);
- try func.addLabel(.call, func_sym_index);
+ const result_ptr = try cg.allocStack(cg.typeOfIndex(inst));
+ try cg.lowerToStack(result_ptr);
+ try cg.emitWValue(operand);
+ try cg.addInst(.{ .tag = .call_tag_name, .data = .{ .ip_index = enum_ty.toIntern() } });
- return func.finishAir(inst, result_ptr, &.{un_op});
+ return cg.finishAir(inst, result_ptr, &.{un_op});
}
-fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
- const pt = func.pt;
- const zcu = pt.zcu;
+fn airErrorSetHasValue(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
const ip = &zcu.intern_pool;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- var arena_allocator = std.heap.ArenaAllocator.init(func.gpa);
- defer arena_allocator.deinit();
- const arena = arena_allocator.allocator();
-
- const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{ip.loadEnumType(enum_ty.toIntern()).name.fmt(ip)});
-
- // check if we already generated code for this.
- if (func.bin_file.findGlobalSymbol(func_name)) |loc| {
- return @intFromEnum(loc.index);
- }
-
- const int_tag_ty = enum_ty.intTagType(zcu);
-
- if (int_tag_ty.bitSize(zcu) > 64) {
- return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{});
- }
-
- var relocs = std.ArrayList(link.File.Wasm.Relocation).init(func.gpa);
- defer relocs.deinit();
-
- var body_list = std.ArrayList(u8).init(func.gpa);
- defer body_list.deinit();
- var writer = body_list.writer();
-
- // The locals of the function body (always 0)
- try leb.writeUleb128(writer, @as(u32, 0));
-
- // outer block
- try writer.writeByte(std.wasm.opcode(.block));
- try writer.writeByte(std.wasm.block_empty);
-
- // TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse.
- // generate an if-else chain for each tag value as well as constant.
- const tag_names = enum_ty.enumFields(zcu);
- for (0..tag_names.len) |tag_index| {
- const tag_name = tag_names.get(ip)[tag_index];
- const tag_name_len = tag_name.length(ip);
- // for each tag name, create an unnamed const,
- // and then get a pointer to its value.
- const name_ty = try pt.arrayType(.{
- .len = tag_name_len,
- .child = .u8_type,
- .sentinel = .zero_u8,
- });
- const name_val = try pt.intern(.{ .aggregate = .{
- .ty = name_ty.toIntern(),
- .storage = .{ .bytes = tag_name.toString() },
- } });
- const tag_sym_index = switch (try func.bin_file.lowerUav(pt, name_val, .none, func.src_loc)) {
- .mcv => |mcv| mcv.load_symbol,
- .fail => |err_msg| {
- func.err_msg = err_msg;
- return error.CodegenFail;
- },
- };
-
- // block for this if case
- try writer.writeByte(std.wasm.opcode(.block));
- try writer.writeByte(std.wasm.block_empty);
-
- // get actual tag value (stored in 2nd parameter);
- try writer.writeByte(std.wasm.opcode(.local_get));
- try leb.writeUleb128(writer, @as(u32, 1));
-
- const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index));
- const tag_value = try func.lowerConstant(tag_val, enum_ty);
-
- switch (tag_value) {
- .imm32 => |value| {
- try writer.writeByte(std.wasm.opcode(.i32_const));
- try leb.writeIleb128(writer, @as(i32, @bitCast(value)));
- try writer.writeByte(std.wasm.opcode(.i32_ne));
- },
- .imm64 => |value| {
- try writer.writeByte(std.wasm.opcode(.i64_const));
- try leb.writeIleb128(writer, @as(i64, @bitCast(value)));
- try writer.writeByte(std.wasm.opcode(.i64_ne));
- },
- else => unreachable,
- }
- // if they're not equal, break out of current branch
- try writer.writeByte(std.wasm.opcode(.br_if));
- try leb.writeUleb128(writer, @as(u32, 0));
-
- // store the address of the tagname in the pointer field of the slice
- // get the address twice so we can also store the length.
- try writer.writeByte(std.wasm.opcode(.local_get));
- try leb.writeUleb128(writer, @as(u32, 0));
- try writer.writeByte(std.wasm.opcode(.local_get));
- try leb.writeUleb128(writer, @as(u32, 0));
-
- // get address of tagname and emit a relocation to it
- if (func.arch() == .wasm32) {
- const encoded_alignment = @ctz(@as(u32, 4));
- try writer.writeByte(std.wasm.opcode(.i32_const));
- try relocs.append(.{
- .relocation_type = .R_WASM_MEMORY_ADDR_LEB,
- .offset = @as(u32, @intCast(body_list.items.len)),
- .index = tag_sym_index,
- });
- try writer.writeAll(&[_]u8{0} ** 5); // will be relocated
-
- // store pointer
- try writer.writeByte(std.wasm.opcode(.i32_store));
- try leb.writeUleb128(writer, encoded_alignment);
- try leb.writeUleb128(writer, @as(u32, 0));
-
- // store length
- try writer.writeByte(std.wasm.opcode(.i32_const));
- try leb.writeUleb128(writer, @as(u32, @intCast(tag_name_len)));
- try writer.writeByte(std.wasm.opcode(.i32_store));
- try leb.writeUleb128(writer, encoded_alignment);
- try leb.writeUleb128(writer, @as(u32, 4));
- } else {
- const encoded_alignment = @ctz(@as(u32, 8));
- try writer.writeByte(std.wasm.opcode(.i64_const));
- try relocs.append(.{
- .relocation_type = .R_WASM_MEMORY_ADDR_LEB64,
- .offset = @as(u32, @intCast(body_list.items.len)),
- .index = tag_sym_index,
- });
- try writer.writeAll(&[_]u8{0} ** 10); // will be relocated
-
- // store pointer
- try writer.writeByte(std.wasm.opcode(.i64_store));
- try leb.writeUleb128(writer, encoded_alignment);
- try leb.writeUleb128(writer, @as(u32, 0));
-
- // store length
- try writer.writeByte(std.wasm.opcode(.i64_const));
- try leb.writeUleb128(writer, @as(u64, @intCast(tag_name_len)));
- try writer.writeByte(std.wasm.opcode(.i64_store));
- try leb.writeUleb128(writer, encoded_alignment);
- try leb.writeUleb128(writer, @as(u32, 8));
- }
-
- // break outside blocks
- try writer.writeByte(std.wasm.opcode(.br));
- try leb.writeUleb128(writer, @as(u32, 1));
-
- // end the block for this case
- try writer.writeByte(std.wasm.opcode(.end));
- }
-
- try writer.writeByte(std.wasm.opcode(.@"unreachable")); // tag value does not have a name
- // finish outer block
- try writer.writeByte(std.wasm.opcode(.end));
- // finish function body
- try writer.writeByte(std.wasm.opcode(.end));
-
- const slice_ty = Type.slice_const_u8_sentinel_0;
- const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, pt, func.target.*);
- const sym_index = try func.bin_file.createFunction(func_name, func_type, &body_list, &relocs);
- return @intFromEnum(sym_index);
-}
-
-fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ip = &zcu.intern_pool;
- const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
-
- const operand = try func.resolveInst(ty_op.operand);
+ const operand = try cg.resolveInst(ty_op.operand);
const error_set_ty = ty_op.ty.toType();
- const result = try func.allocLocal(Type.bool);
+ const result = try cg.allocLocal(Type.bool);
const names = error_set_ty.errorSetNames(zcu);
- var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len);
+ var values = try std.ArrayList(u32).initCapacity(cg.gpa, names.len);
defer values.deinit();
var lowest: ?u32 = null;
@@ -7418,23 +7099,23 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
// start block for 'true' branch
- try func.startBlock(.block, wasm.block_empty);
+ try cg.startBlock(.block, .empty);
// start block for 'false' branch
- try func.startBlock(.block, wasm.block_empty);
+ try cg.startBlock(.block, .empty);
// block for the jump table itself
- try func.startBlock(.block, wasm.block_empty);
+ try cg.startBlock(.block, .empty);
// lower operand to determine jump table target
- try func.emitWValue(operand);
- try func.addImm32(lowest.?);
- try func.addTag(.i32_sub);
+ try cg.emitWValue(operand);
+ try cg.addImm32(lowest.?);
+ try cg.addTag(.i32_sub);
// Account for default branch so always add '1'
const depth = @as(u32, @intCast(highest.? - lowest.? + 1));
const jump_table: Mir.JumpTable = .{ .length = depth };
- const table_extra_index = try func.addExtra(jump_table);
- try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
- try func.mir_extra.ensureUnusedCapacity(func.gpa, depth);
+ const table_extra_index = try cg.addExtra(jump_table);
+ try cg.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
+ try cg.mir_extra.ensureUnusedCapacity(cg.gpa, depth);
var value: u32 = lowest.?;
while (value <= highest.?) : (value += 1) {
@@ -7444,202 +7125,200 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
break :blk 0;
};
- func.mir_extra.appendAssumeCapacity(idx);
+ cg.mir_extra.appendAssumeCapacity(idx);
}
- try func.endBlock();
+ try cg.endBlock();
// 'false' branch (i.e. error set does not have value
// ensure we set local to 0 in case the local was re-used.
- try func.addImm32(0);
- try func.addLabel(.local_set, result.local.value);
- try func.addLabel(.br, 1);
- try func.endBlock();
+ try cg.addImm32(0);
+ try cg.addLocal(.local_set, result.local.value);
+ try cg.addLabel(.br, 1);
+ try cg.endBlock();
// 'true' branch
- try func.addImm32(1);
- try func.addLabel(.local_set, result.local.value);
- try func.addLabel(.br, 0);
- try func.endBlock();
+ try cg.addImm32(1);
+ try cg.addLocal(.local_set, result.local.value);
+ try cg.addLabel(.br, 0);
+ try cg.endBlock();
- return func.finishAir(inst, result, &.{ty_op.operand});
+ return cg.finishAir(inst, result, &.{ty_op.operand});
}
-inline fn useAtomicFeature(func: *const CodeGen) bool {
- return std.Target.wasm.featureSetHas(func.target.cpu.features, .atomics);
+inline fn useAtomicFeature(cg: *const CodeGen) bool {
+ return std.Target.wasm.featureSetHas(cg.target.cpu.features, .atomics);
}
-fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
+fn airCmpxchg(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
- const ptr_ty = func.typeOf(extra.ptr);
+ const ptr_ty = cg.typeOf(extra.ptr);
const ty = ptr_ty.childType(zcu);
- const result_ty = func.typeOfIndex(inst);
+ const result_ty = cg.typeOfIndex(inst);
- const ptr_operand = try func.resolveInst(extra.ptr);
- const expected_val = try func.resolveInst(extra.expected_value);
- const new_val = try func.resolveInst(extra.new_value);
+ const ptr_operand = try cg.resolveInst(extra.ptr);
+ const expected_val = try cg.resolveInst(extra.expected_value);
+ const new_val = try cg.resolveInst(extra.new_value);
- const cmp_result = try func.allocLocal(Type.bool);
+ const cmp_result = try cg.allocLocal(Type.bool);
- const ptr_val = if (func.useAtomicFeature()) val: {
- const val_local = try func.allocLocal(ty);
- try func.emitWValue(ptr_operand);
- try func.lowerToStack(expected_val);
- try func.lowerToStack(new_val);
- try func.addAtomicMemArg(switch (ty.abiSize(zcu)) {
+ const ptr_val = if (cg.useAtomicFeature()) val: {
+ const val_local = try cg.allocLocal(ty);
+ try cg.emitWValue(ptr_operand);
+ try cg.lowerToStack(expected_val);
+ try cg.lowerToStack(new_val);
+ try cg.addAtomicMemArg(switch (ty.abiSize(zcu)) {
1 => .i32_atomic_rmw8_cmpxchg_u,
2 => .i32_atomic_rmw16_cmpxchg_u,
4 => .i32_atomic_rmw_cmpxchg,
8 => .i32_atomic_rmw_cmpxchg,
- else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
+ else => |size| return cg.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
}, .{
.offset = ptr_operand.offset(),
.alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
});
- try func.addLabel(.local_tee, val_local.local.value);
- _ = try func.cmp(.stack, expected_val, ty, .eq);
- try func.addLabel(.local_set, cmp_result.local.value);
+ try cg.addLocal(.local_tee, val_local.local.value);
+ _ = try cg.cmp(.stack, expected_val, ty, .eq);
+ try cg.addLocal(.local_set, cmp_result.local.value);
break :val val_local;
} else val: {
if (ty.abiSize(zcu) > 8) {
- return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{});
+ return cg.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{});
}
- const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty);
+ const ptr_val = try WValue.toLocal(try cg.load(ptr_operand, ty, 0), cg, ty);
- try func.lowerToStack(ptr_operand);
- try func.lowerToStack(new_val);
- try func.emitWValue(ptr_val);
- _ = try func.cmp(ptr_val, expected_val, ty, .eq);
- try func.addLabel(.local_tee, cmp_result.local.value);
- try func.addTag(.select);
- try func.store(.stack, .stack, ty, 0);
+ try cg.lowerToStack(ptr_operand);
+ try cg.lowerToStack(new_val);
+ try cg.emitWValue(ptr_val);
+ _ = try cg.cmp(ptr_val, expected_val, ty, .eq);
+ try cg.addLocal(.local_tee, cmp_result.local.value);
+ try cg.addTag(.select);
+ try cg.store(.stack, .stack, ty, 0);
break :val ptr_val;
};
- const result = if (isByRef(result_ty, pt, func.target.*)) val: {
- try func.emitWValue(cmp_result);
- try func.addImm32(~@as(u32, 0));
- try func.addTag(.i32_xor);
- try func.addImm32(1);
- try func.addTag(.i32_and);
- const and_result = try WValue.toLocal(.stack, func, Type.bool);
- const result_ptr = try func.allocStack(result_ty);
- try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(zcu))));
- try func.store(result_ptr, ptr_val, ty, 0);
+ const result = if (isByRef(result_ty, zcu, cg.target)) val: {
+ try cg.emitWValue(cmp_result);
+ try cg.addImm32(~@as(u32, 0));
+ try cg.addTag(.i32_xor);
+ try cg.addImm32(1);
+ try cg.addTag(.i32_and);
+ const and_result = try WValue.toLocal(.stack, cg, Type.bool);
+ const result_ptr = try cg.allocStack(result_ty);
+ try cg.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(zcu))));
+ try cg.store(result_ptr, ptr_val, ty, 0);
break :val result_ptr;
} else val: {
- try func.addImm32(0);
- try func.emitWValue(ptr_val);
- try func.emitWValue(cmp_result);
- try func.addTag(.select);
+ try cg.addImm32(0);
+ try cg.emitWValue(ptr_val);
+ try cg.emitWValue(cmp_result);
+ try cg.addTag(.select);
break :val .stack;
};
- return func.finishAir(inst, result, &.{ extra.ptr, extra.expected_value, extra.new_value });
+ return cg.finishAir(inst, result, &.{ extra.ptr, extra.expected_value, extra.new_value });
}
-fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const atomic_load = func.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
- const ptr = try func.resolveInst(atomic_load.ptr);
- const ty = func.typeOfIndex(inst);
+fn airAtomicLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const atomic_load = cg.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
+ const ptr = try cg.resolveInst(atomic_load.ptr);
+ const ty = cg.typeOfIndex(inst);
- if (func.useAtomicFeature()) {
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt.zcu)) {
+ if (cg.useAtomicFeature()) {
+ const tag: std.wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
1 => .i32_atomic_load8_u,
2 => .i32_atomic_load16_u,
4 => .i32_atomic_load,
8 => .i64_atomic_load,
- else => |size| return func.fail("TODO: @atomicLoad for types with abi size {d}", .{size}),
+ else => |size| return cg.fail("TODO: @atomicLoad for types with abi size {d}", .{size}),
};
- try func.emitWValue(ptr);
- try func.addAtomicMemArg(tag, .{
+ try cg.emitWValue(ptr);
+ try cg.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = @intCast(ty.abiAlignment(pt.zcu).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
});
} else {
- _ = try func.load(ptr, ty, 0);
+ _ = try cg.load(ptr, ty, 0);
}
- return func.finishAir(inst, .stack, &.{atomic_load.ptr});
+ return cg.finishAir(inst, .stack, &.{atomic_load.ptr});
}
-fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
- const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data;
+fn airAtomicRmw(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const extra = cg.air.extraData(Air.AtomicRmw, pl_op.payload).data;
- const ptr = try func.resolveInst(pl_op.operand);
- const operand = try func.resolveInst(extra.operand);
- const ty = func.typeOfIndex(inst);
+ const ptr = try cg.resolveInst(pl_op.operand);
+ const operand = try cg.resolveInst(extra.operand);
+ const ty = cg.typeOfIndex(inst);
const op: std.builtin.AtomicRmwOp = extra.op();
- if (func.useAtomicFeature()) {
+ if (cg.useAtomicFeature()) {
switch (op) {
.Max,
.Min,
.Nand,
=> {
- const tmp = try func.load(ptr, ty, 0);
- const value = try tmp.toLocal(func, ty);
+ const tmp = try cg.load(ptr, ty, 0);
+ const value = try tmp.toLocal(cg, ty);
// create a loop to cmpxchg the new value
- try func.startBlock(.loop, wasm.block_empty);
+ try cg.startBlock(.loop, .empty);
- try func.emitWValue(ptr);
- try func.emitWValue(value);
+ try cg.emitWValue(ptr);
+ try cg.emitWValue(value);
if (op == .Nand) {
const wasm_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?;
- const and_res = try func.binOp(value, operand, ty, .@"and");
+ const and_res = try cg.binOp(value, operand, ty, .@"and");
if (wasm_bits == 32)
- try func.addImm32(~@as(u32, 0))
+ try cg.addImm32(~@as(u32, 0))
else if (wasm_bits == 64)
- try func.addImm64(~@as(u64, 0))
+ try cg.addImm64(~@as(u64, 0))
else
- return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
- _ = try func.binOp(and_res, .stack, ty, .xor);
+ return cg.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
+ _ = try cg.binOp(and_res, .stack, ty, .xor);
} else {
- try func.emitWValue(value);
- try func.emitWValue(operand);
- _ = try func.cmp(value, operand, ty, if (op == .Max) .gt else .lt);
- try func.addTag(.select);
+ try cg.emitWValue(value);
+ try cg.emitWValue(operand);
+ _ = try cg.cmp(value, operand, ty, if (op == .Max) .gt else .lt);
+ try cg.addTag(.select);
}
- try func.addAtomicMemArg(
+ try cg.addAtomicMemArg(
switch (ty.abiSize(zcu)) {
1 => .i32_atomic_rmw8_cmpxchg_u,
2 => .i32_atomic_rmw16_cmpxchg_u,
4 => .i32_atomic_rmw_cmpxchg,
8 => .i64_atomic_rmw_cmpxchg,
- else => return func.fail("TODO: implement `@atomicRmw` with operation `{s}` for types larger than 64 bits", .{@tagName(op)}),
+ else => return cg.fail("TODO: implement `@atomicRmw` with operation `{s}` for types larger than 64 bits", .{@tagName(op)}),
},
.{
.offset = ptr.offset(),
.alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
},
);
- const select_res = try func.allocLocal(ty);
- try func.addLabel(.local_tee, select_res.local.value);
- _ = try func.cmp(.stack, value, ty, .neq); // leave on stack so we can use it for br_if
+ const select_res = try cg.allocLocal(ty);
+ try cg.addLocal(.local_tee, select_res.local.value);
+ _ = try cg.cmp(.stack, value, ty, .neq); // leave on stack so we can use it for br_if
- try func.emitWValue(select_res);
- try func.addLabel(.local_set, value.local.value);
+ try cg.emitWValue(select_res);
+ try cg.addLocal(.local_set, value.local.value);
- try func.addLabel(.br_if, 0);
- try func.endBlock();
- return func.finishAir(inst, value, &.{ pl_op.operand, extra.operand });
+ try cg.addLabel(.br_if, 0);
+ try cg.endBlock();
+ return cg.finishAir(inst, value, &.{ pl_op.operand, extra.operand });
},
// the other operations have their own instructions for Wasm.
else => {
- try func.emitWValue(ptr);
- try func.emitWValue(operand);
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
+ try cg.emitWValue(ptr);
+ try cg.emitWValue(operand);
+ const tag: std.wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
1 => switch (op) {
.Xchg => .i32_atomic_rmw8_xchg_u,
.Add => .i32_atomic_rmw8_add_u,
@@ -7676,22 +7355,22 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.Xor => .i64_atomic_rmw_xor,
else => unreachable,
},
- else => |size| return func.fail("TODO: Implement `@atomicRmw` for types with abi size {d}", .{size}),
+ else => |size| return cg.fail("TODO: Implement `@atomicRmw` for types with abi size {d}", .{size}),
};
- try func.addAtomicMemArg(tag, .{
+ try cg.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
.alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
});
- return func.finishAir(inst, .stack, &.{ pl_op.operand, extra.operand });
+ return cg.finishAir(inst, .stack, &.{ pl_op.operand, extra.operand });
},
}
} else {
- const loaded = try func.load(ptr, ty, 0);
- const result = try loaded.toLocal(func, ty);
+ const loaded = try cg.load(ptr, ty, 0);
+ const result = try loaded.toLocal(cg, ty);
switch (op) {
.Xchg => {
- try func.store(ptr, operand, ty, 0);
+ try cg.store(ptr, operand, ty, 0);
},
.Add,
.Sub,
@@ -7699,8 +7378,8 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.Or,
.Xor,
=> {
- try func.emitWValue(ptr);
- _ = try func.binOp(result, operand, ty, switch (op) {
+ try cg.emitWValue(ptr);
+ _ = try cg.binOp(result, operand, ty, switch (op) {
.Add => .add,
.Sub => .sub,
.And => .@"and",
@@ -7709,87 +7388,123 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
});
if (ty.isInt(zcu) and (op == .Add or op == .Sub)) {
- _ = try func.wrapOperand(.stack, ty);
+ _ = try cg.wrapOperand(.stack, ty);
}
- try func.store(.stack, .stack, ty, ptr.offset());
+ try cg.store(.stack, .stack, ty, ptr.offset());
},
.Max,
.Min,
=> {
- try func.emitWValue(ptr);
- try func.emitWValue(result);
- try func.emitWValue(operand);
- _ = try func.cmp(result, operand, ty, if (op == .Max) .gt else .lt);
- try func.addTag(.select);
- try func.store(.stack, .stack, ty, ptr.offset());
+ try cg.emitWValue(ptr);
+ try cg.emitWValue(result);
+ try cg.emitWValue(operand);
+ _ = try cg.cmp(result, operand, ty, if (op == .Max) .gt else .lt);
+ try cg.addTag(.select);
+ try cg.store(.stack, .stack, ty, ptr.offset());
},
.Nand => {
const wasm_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?;
- try func.emitWValue(ptr);
- const and_res = try func.binOp(result, operand, ty, .@"and");
+ try cg.emitWValue(ptr);
+ const and_res = try cg.binOp(result, operand, ty, .@"and");
if (wasm_bits == 32)
- try func.addImm32(~@as(u32, 0))
+ try cg.addImm32(~@as(u32, 0))
else if (wasm_bits == 64)
- try func.addImm64(~@as(u64, 0))
+ try cg.addImm64(~@as(u64, 0))
else
- return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
- _ = try func.binOp(and_res, .stack, ty, .xor);
- try func.store(.stack, .stack, ty, ptr.offset());
+ return cg.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
+ _ = try cg.binOp(and_res, .stack, ty, .xor);
+ try cg.store(.stack, .stack, ty, ptr.offset());
},
}
- return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
+ return cg.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
}
}
-fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
- const zcu = pt.zcu;
- const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airAtomicStore(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const zcu = cg.pt.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const ptr = try func.resolveInst(bin_op.lhs);
- const operand = try func.resolveInst(bin_op.rhs);
- const ptr_ty = func.typeOf(bin_op.lhs);
+ const ptr = try cg.resolveInst(bin_op.lhs);
+ const operand = try cg.resolveInst(bin_op.rhs);
+ const ptr_ty = cg.typeOf(bin_op.lhs);
const ty = ptr_ty.childType(zcu);
- if (func.useAtomicFeature()) {
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
+ if (cg.useAtomicFeature()) {
+ const tag: std.wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
1 => .i32_atomic_store8,
2 => .i32_atomic_store16,
4 => .i32_atomic_store,
8 => .i64_atomic_store,
- else => |size| return func.fail("TODO: @atomicLoad for types with abi size {d}", .{size}),
+ else => |size| return cg.fail("TODO: @atomicLoad for types with abi size {d}", .{size}),
};
- try func.emitWValue(ptr);
- try func.lowerToStack(operand);
- try func.addAtomicMemArg(tag, .{
+ try cg.emitWValue(ptr);
+ try cg.lowerToStack(operand);
+ try cg.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
.alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
});
} else {
- try func.store(ptr, operand, ty, 0);
+ try cg.store(ptr, operand, ty, 0);
}
- return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+ return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
}
-fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- if (func.initial_stack_value == .none) {
- try func.initializeStack();
+fn airFrameAddress(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ if (cg.initial_stack_value == .none) {
+ try cg.initializeStack();
}
- try func.emitWValue(func.bottom_stack_value);
- return func.finishAir(inst, .stack, &.{});
+ try cg.emitWValue(cg.bottom_stack_value);
+ return cg.finishAir(inst, .stack, &.{});
}
-fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type {
- const pt = func.pt;
- const zcu = pt.zcu;
- return func.air.typeOf(inst, &zcu.intern_pool);
+fn typeOf(cg: *CodeGen, inst: Air.Inst.Ref) Type {
+ const zcu = cg.pt.zcu;
+ return cg.air.typeOf(inst, &zcu.intern_pool);
}
-fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type {
- const pt = func.pt;
- const zcu = pt.zcu;
- return func.air.typeOfIndex(inst, &zcu.intern_pool);
+fn typeOfIndex(cg: *CodeGen, inst: Air.Inst.Index) Type {
+ const zcu = cg.pt.zcu;
+ return cg.air.typeOfIndex(inst, &zcu.intern_pool);
+}
+
+fn floatCmpIntrinsic(op: std.math.CompareOperator, bits: u16) Mir.Intrinsic {
+ return switch (op) {
+ .lt => switch (bits) {
+ 80 => .__ltxf2,
+ 128 => .__lttf2,
+ else => unreachable,
+ },
+ .lte => switch (bits) {
+ 80 => .__lexf2,
+ 128 => .__letf2,
+ else => unreachable,
+ },
+ .eq => switch (bits) {
+ 80 => .__eqxf2,
+ 128 => .__eqtf2,
+ else => unreachable,
+ },
+ .neq => switch (bits) {
+ 80 => .__nexf2,
+ 128 => .__netf2,
+ else => unreachable,
+ },
+ .gte => switch (bits) {
+ 80 => .__gexf2,
+ 128 => .__getf2,
+ else => unreachable,
+ },
+ .gt => switch (bits) {
+ 80 => .__gtxf2,
+ 128 => .__gttf2,
+ else => unreachable,
+ },
+ };
+}
+
+fn extraLen(cg: *const CodeGen) u32 {
+ return @intCast(cg.mir_extra.items.len - cg.start_mir_extra_off);
}
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index cd744cd53e..28159f3336 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -1,673 +1,973 @@
-//! Contains all logic to lower wasm MIR into its binary
-//! or textual representation.
-
const Emit = @This();
+
const std = @import("std");
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+const leb = std.leb;
+
+const Wasm = link.File.Wasm;
const Mir = @import("Mir.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
const InternPool = @import("../../InternPool.zig");
const codegen = @import("../../codegen.zig");
-const leb128 = std.leb;
-/// Contains our list of instructions
mir: Mir,
-/// Reference to the Wasm module linker
-bin_file: *link.File.Wasm,
-/// Possible error message. When set, the value is allocated and
-/// must be freed manually.
-error_msg: ?*Zcu.ErrorMsg = null,
-/// The binary representation that will be emit by this module.
-code: *std.ArrayList(u8),
-/// List of allocated locals.
-locals: []const u8,
-/// The declaration that code is being generated for.
-owner_nav: InternPool.Nav.Index,
-
-// Debug information
-/// Holds the debug information for this emission
-dbg_output: link.File.DebugInfoOutput,
-/// Previous debug info line
-prev_di_line: u32,
-/// Previous debug info column
-prev_di_column: u32,
-/// Previous offset relative to code section
-prev_di_offset: u32,
-
-const InnerError = error{
+wasm: *Wasm,
+/// The binary representation that will be emitted by this module.
+code: *std.ArrayListUnmanaged(u8),
+
+pub const Error = error{
OutOfMemory,
- EmitFail,
};
-pub fn emitMir(emit: *Emit) InnerError!void {
- const mir_tags = emit.mir.instructions.items(.tag);
- // write the locals in the prologue of the function body
- // before we emit the function body when lowering MIR
- try emit.emitLocals();
-
- for (mir_tags, 0..) |tag, index| {
- const inst = @as(u32, @intCast(index));
- switch (tag) {
- // block instructions
- .block => try emit.emitBlock(tag, inst),
- .loop => try emit.emitBlock(tag, inst),
-
- .dbg_line => try emit.emitDbgLine(inst),
- .dbg_epilogue_begin => try emit.emitDbgEpilogueBegin(),
- .dbg_prologue_end => try emit.emitDbgPrologueEnd(),
-
- // branch instructions
- .br_if => try emit.emitLabel(tag, inst),
- .br_table => try emit.emitBrTable(inst),
- .br => try emit.emitLabel(tag, inst),
-
- // relocatables
- .call => try emit.emitCall(inst),
- .call_indirect => try emit.emitCallIndirect(inst),
- .global_get => try emit.emitGlobal(tag, inst),
- .global_set => try emit.emitGlobal(tag, inst),
- .function_index => try emit.emitFunctionIndex(inst),
- .memory_address => try emit.emitMemAddress(inst),
-
- // immediates
- .f32_const => try emit.emitFloat32(inst),
- .f64_const => try emit.emitFloat64(inst),
- .i32_const => try emit.emitImm32(inst),
- .i64_const => try emit.emitImm64(inst),
-
- // memory instructions
- .i32_load => try emit.emitMemArg(tag, inst),
- .i64_load => try emit.emitMemArg(tag, inst),
- .f32_load => try emit.emitMemArg(tag, inst),
- .f64_load => try emit.emitMemArg(tag, inst),
- .i32_load8_s => try emit.emitMemArg(tag, inst),
- .i32_load8_u => try emit.emitMemArg(tag, inst),
- .i32_load16_s => try emit.emitMemArg(tag, inst),
- .i32_load16_u => try emit.emitMemArg(tag, inst),
- .i64_load8_s => try emit.emitMemArg(tag, inst),
- .i64_load8_u => try emit.emitMemArg(tag, inst),
- .i64_load16_s => try emit.emitMemArg(tag, inst),
- .i64_load16_u => try emit.emitMemArg(tag, inst),
- .i64_load32_s => try emit.emitMemArg(tag, inst),
- .i64_load32_u => try emit.emitMemArg(tag, inst),
- .i32_store => try emit.emitMemArg(tag, inst),
- .i64_store => try emit.emitMemArg(tag, inst),
- .f32_store => try emit.emitMemArg(tag, inst),
- .f64_store => try emit.emitMemArg(tag, inst),
- .i32_store8 => try emit.emitMemArg(tag, inst),
- .i32_store16 => try emit.emitMemArg(tag, inst),
- .i64_store8 => try emit.emitMemArg(tag, inst),
- .i64_store16 => try emit.emitMemArg(tag, inst),
- .i64_store32 => try emit.emitMemArg(tag, inst),
-
- // Instructions with an index that do not require relocations
- .local_get => try emit.emitLabel(tag, inst),
- .local_set => try emit.emitLabel(tag, inst),
- .local_tee => try emit.emitLabel(tag, inst),
- .memory_grow => try emit.emitLabel(tag, inst),
- .memory_size => try emit.emitLabel(tag, inst),
-
- // no-ops
- .end => try emit.emitTag(tag),
- .@"return" => try emit.emitTag(tag),
- .@"unreachable" => try emit.emitTag(tag),
-
- .select => try emit.emitTag(tag),
-
- // arithmetic
- .i32_eqz => try emit.emitTag(tag),
- .i32_eq => try emit.emitTag(tag),
- .i32_ne => try emit.emitTag(tag),
- .i32_lt_s => try emit.emitTag(tag),
- .i32_lt_u => try emit.emitTag(tag),
- .i32_gt_s => try emit.emitTag(tag),
- .i32_gt_u => try emit.emitTag(tag),
- .i32_le_s => try emit.emitTag(tag),
- .i32_le_u => try emit.emitTag(tag),
- .i32_ge_s => try emit.emitTag(tag),
- .i32_ge_u => try emit.emitTag(tag),
- .i64_eqz => try emit.emitTag(tag),
- .i64_eq => try emit.emitTag(tag),
- .i64_ne => try emit.emitTag(tag),
- .i64_lt_s => try emit.emitTag(tag),
- .i64_lt_u => try emit.emitTag(tag),
- .i64_gt_s => try emit.emitTag(tag),
- .i64_gt_u => try emit.emitTag(tag),
- .i64_le_s => try emit.emitTag(tag),
- .i64_le_u => try emit.emitTag(tag),
- .i64_ge_s => try emit.emitTag(tag),
- .i64_ge_u => try emit.emitTag(tag),
- .f32_eq => try emit.emitTag(tag),
- .f32_ne => try emit.emitTag(tag),
- .f32_lt => try emit.emitTag(tag),
- .f32_gt => try emit.emitTag(tag),
- .f32_le => try emit.emitTag(tag),
- .f32_ge => try emit.emitTag(tag),
- .f64_eq => try emit.emitTag(tag),
- .f64_ne => try emit.emitTag(tag),
- .f64_lt => try emit.emitTag(tag),
- .f64_gt => try emit.emitTag(tag),
- .f64_le => try emit.emitTag(tag),
- .f64_ge => try emit.emitTag(tag),
- .i32_add => try emit.emitTag(tag),
- .i32_sub => try emit.emitTag(tag),
- .i32_mul => try emit.emitTag(tag),
- .i32_div_s => try emit.emitTag(tag),
- .i32_div_u => try emit.emitTag(tag),
- .i32_and => try emit.emitTag(tag),
- .i32_or => try emit.emitTag(tag),
- .i32_xor => try emit.emitTag(tag),
- .i32_shl => try emit.emitTag(tag),
- .i32_shr_s => try emit.emitTag(tag),
- .i32_shr_u => try emit.emitTag(tag),
- .i64_add => try emit.emitTag(tag),
- .i64_sub => try emit.emitTag(tag),
- .i64_mul => try emit.emitTag(tag),
- .i64_div_s => try emit.emitTag(tag),
- .i64_div_u => try emit.emitTag(tag),
- .i64_and => try emit.emitTag(tag),
- .i64_or => try emit.emitTag(tag),
- .i64_xor => try emit.emitTag(tag),
- .i64_shl => try emit.emitTag(tag),
- .i64_shr_s => try emit.emitTag(tag),
- .i64_shr_u => try emit.emitTag(tag),
- .f32_abs => try emit.emitTag(tag),
- .f32_neg => try emit.emitTag(tag),
- .f32_ceil => try emit.emitTag(tag),
- .f32_floor => try emit.emitTag(tag),
- .f32_trunc => try emit.emitTag(tag),
- .f32_nearest => try emit.emitTag(tag),
- .f32_sqrt => try emit.emitTag(tag),
- .f32_add => try emit.emitTag(tag),
- .f32_sub => try emit.emitTag(tag),
- .f32_mul => try emit.emitTag(tag),
- .f32_div => try emit.emitTag(tag),
- .f32_min => try emit.emitTag(tag),
- .f32_max => try emit.emitTag(tag),
- .f32_copysign => try emit.emitTag(tag),
- .f64_abs => try emit.emitTag(tag),
- .f64_neg => try emit.emitTag(tag),
- .f64_ceil => try emit.emitTag(tag),
- .f64_floor => try emit.emitTag(tag),
- .f64_trunc => try emit.emitTag(tag),
- .f64_nearest => try emit.emitTag(tag),
- .f64_sqrt => try emit.emitTag(tag),
- .f64_add => try emit.emitTag(tag),
- .f64_sub => try emit.emitTag(tag),
- .f64_mul => try emit.emitTag(tag),
- .f64_div => try emit.emitTag(tag),
- .f64_min => try emit.emitTag(tag),
- .f64_max => try emit.emitTag(tag),
- .f64_copysign => try emit.emitTag(tag),
- .i32_wrap_i64 => try emit.emitTag(tag),
- .i64_extend_i32_s => try emit.emitTag(tag),
- .i64_extend_i32_u => try emit.emitTag(tag),
- .i32_extend8_s => try emit.emitTag(tag),
- .i32_extend16_s => try emit.emitTag(tag),
- .i64_extend8_s => try emit.emitTag(tag),
- .i64_extend16_s => try emit.emitTag(tag),
- .i64_extend32_s => try emit.emitTag(tag),
- .f32_demote_f64 => try emit.emitTag(tag),
- .f64_promote_f32 => try emit.emitTag(tag),
- .i32_reinterpret_f32 => try emit.emitTag(tag),
- .i64_reinterpret_f64 => try emit.emitTag(tag),
- .f32_reinterpret_i32 => try emit.emitTag(tag),
- .f64_reinterpret_i64 => try emit.emitTag(tag),
- .i32_trunc_f32_s => try emit.emitTag(tag),
- .i32_trunc_f32_u => try emit.emitTag(tag),
- .i32_trunc_f64_s => try emit.emitTag(tag),
- .i32_trunc_f64_u => try emit.emitTag(tag),
- .i64_trunc_f32_s => try emit.emitTag(tag),
- .i64_trunc_f32_u => try emit.emitTag(tag),
- .i64_trunc_f64_s => try emit.emitTag(tag),
- .i64_trunc_f64_u => try emit.emitTag(tag),
- .f32_convert_i32_s => try emit.emitTag(tag),
- .f32_convert_i32_u => try emit.emitTag(tag),
- .f32_convert_i64_s => try emit.emitTag(tag),
- .f32_convert_i64_u => try emit.emitTag(tag),
- .f64_convert_i32_s => try emit.emitTag(tag),
- .f64_convert_i32_u => try emit.emitTag(tag),
- .f64_convert_i64_s => try emit.emitTag(tag),
- .f64_convert_i64_u => try emit.emitTag(tag),
- .i32_rem_s => try emit.emitTag(tag),
- .i32_rem_u => try emit.emitTag(tag),
- .i64_rem_s => try emit.emitTag(tag),
- .i64_rem_u => try emit.emitTag(tag),
- .i32_popcnt => try emit.emitTag(tag),
- .i64_popcnt => try emit.emitTag(tag),
- .i32_clz => try emit.emitTag(tag),
- .i32_ctz => try emit.emitTag(tag),
- .i64_clz => try emit.emitTag(tag),
- .i64_ctz => try emit.emitTag(tag),
-
- .misc_prefix => try emit.emitExtended(inst),
- .simd_prefix => try emit.emitSimd(inst),
- .atomics_prefix => try emit.emitAtomic(inst),
- }
- }
-}
-
-fn offset(self: Emit) u32 {
- return @as(u32, @intCast(self.code.items.len));
-}
-
-fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
- @branchHint(.cold);
- std.debug.assert(emit.error_msg == null);
- const comp = emit.bin_file.base.comp;
- const zcu = comp.zcu.?;
+pub fn lowerToCode(emit: *Emit) Error!void {
+ const mir = &emit.mir;
+ const code = emit.code;
+ const wasm = emit.wasm;
+ const comp = wasm.base.comp;
const gpa = comp.gpa;
- emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.navSrcLoc(emit.owner_nav), format, args);
- return error.EmitFail;
-}
-
-fn emitLocals(emit: *Emit) !void {
- const writer = emit.code.writer();
- try leb128.writeUleb128(writer, @as(u32, @intCast(emit.locals.len)));
- // emit the actual locals amount
- for (emit.locals) |local| {
- try leb128.writeUleb128(writer, @as(u32, 1));
- try writer.writeByte(local);
- }
-}
-
-fn emitTag(emit: *Emit, tag: Mir.Inst.Tag) !void {
- try emit.code.append(@intFromEnum(tag));
-}
-
-fn emitBlock(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
- const block_type = emit.mir.instructions.items(.data)[inst].block_type;
- try emit.code.append(@intFromEnum(tag));
- try emit.code.append(block_type);
-}
-
-fn emitBrTable(emit: *Emit, inst: Mir.Inst.Index) !void {
- const extra_index = emit.mir.instructions.items(.data)[inst].payload;
- const extra = emit.mir.extraData(Mir.JumpTable, extra_index);
- const labels = emit.mir.extra[extra.end..][0..extra.data.length];
- const writer = emit.code.writer();
+ const is_obj = comp.config.output_mode == .Obj;
+ const target = &comp.root_mod.resolved_target.result;
+ const is_wasm32 = target.cpu.arch == .wasm32;
- try emit.code.append(std.wasm.opcode(.br_table));
- try leb128.writeUleb128(writer, extra.data.length - 1); // Default label is not part of length/depth
- for (labels) |label| {
- try leb128.writeUleb128(writer, label);
- }
-}
+ const tags = mir.instruction_tags;
+ const datas = mir.instruction_datas;
+ var inst: u32 = 0;
-fn emitLabel(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
- const label = emit.mir.instructions.items(.data)[inst].label;
- try emit.code.append(@intFromEnum(tag));
- try leb128.writeUleb128(emit.code.writer(), label);
-}
+ loop: switch (tags[inst]) {
+ .dbg_epilogue_begin => {
+ return;
+ },
+ .block, .loop => {
+ const block_type = datas[inst].block_type;
+ try code.ensureUnusedCapacity(gpa, 2);
+ code.appendAssumeCapacity(@intFromEnum(tags[inst]));
+ code.appendAssumeCapacity(@intFromEnum(block_type));
+
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .uav_ref => {
+ if (is_obj) {
+ try uavRefOffObj(wasm, code, .{ .uav_obj = datas[inst].uav_obj, .offset = 0 }, is_wasm32);
+ } else {
+ try uavRefOffExe(wasm, code, .{ .uav_exe = datas[inst].uav_exe, .offset = 0 }, is_wasm32);
+ }
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .uav_ref_off => {
+ if (is_obj) {
+ try uavRefOffObj(wasm, code, mir.extraData(Mir.UavRefOffObj, datas[inst].payload).data, is_wasm32);
+ } else {
+ try uavRefOffExe(wasm, code, mir.extraData(Mir.UavRefOffExe, datas[inst].payload).data, is_wasm32);
+ }
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .nav_ref => {
+ try navRefOff(wasm, code, .{ .nav_index = datas[inst].nav_index, .offset = 0 }, is_wasm32);
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .nav_ref_off => {
+ try navRefOff(wasm, code, mir.extraData(Mir.NavRefOff, datas[inst].payload).data, is_wasm32);
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .func_ref => {
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
+ if (is_obj) {
+ @panic("TODO");
+ } else {
+ leb.writeUleb128(code.fixedWriter(), 1 + @intFromEnum(datas[inst].indirect_function_table_index)) catch unreachable;
+ }
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .dbg_line => {
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .errors_len => {
+ try code.ensureUnusedCapacity(gpa, 6);
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
+ // MIR is lowered during flush, so there is indeed only one thread at this time.
+ const errors_len = 1 + comp.zcu.?.intern_pool.global_error_set.getNamesFromMainThread().len;
+ leb.writeIleb128(code.fixedWriter(), errors_len) catch unreachable;
+
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .error_name_table_ref => {
+ try code.ensureUnusedCapacity(gpa, 11);
+ const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
+ code.appendAssumeCapacity(@intFromEnum(opcode));
+ if (is_obj) {
+ try wasm.out_relocs.append(gpa, .{
+ .offset = @intCast(code.items.len),
+ .pointee = .{ .symbol_index = try wasm.errorNameTableSymbolIndex() },
+ .tag = if (is_wasm32) .memory_addr_leb else .memory_addr_leb64,
+ .addend = 0,
+ });
+ code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
+
+ inst += 1;
+ continue :loop tags[inst];
+ } else {
+ const addr: u32 = wasm.errorNameTableAddr();
+ leb.writeIleb128(code.fixedWriter(), addr) catch unreachable;
+
+ inst += 1;
+ continue :loop tags[inst];
+ }
+ },
+ .br_if, .br, .memory_grow, .memory_size => {
+ try code.ensureUnusedCapacity(gpa, 11);
+ code.appendAssumeCapacity(@intFromEnum(tags[inst]));
+ leb.writeUleb128(code.fixedWriter(), datas[inst].label) catch unreachable;
-fn emitGlobal(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
- const comp = emit.bin_file.base.comp;
- const gpa = comp.gpa;
- const label = emit.mir.instructions.items(.data)[inst].label;
- try emit.code.append(@intFromEnum(tag));
- var buf: [5]u8 = undefined;
- leb128.writeUnsignedFixed(5, &buf, label);
- const global_offset = emit.offset();
- try emit.code.appendSlice(&buf);
-
- const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom;
- const atom = emit.bin_file.getAtomPtr(atom_index);
- try atom.relocs.append(gpa, .{
- .index = label,
- .offset = global_offset,
- .relocation_type = .R_WASM_GLOBAL_INDEX_LEB,
- });
-}
+ inst += 1;
+ continue :loop tags[inst];
+ },
-fn emitImm32(emit: *Emit, inst: Mir.Inst.Index) !void {
- const value: i32 = emit.mir.instructions.items(.data)[inst].imm32;
- try emit.code.append(std.wasm.opcode(.i32_const));
- try leb128.writeIleb128(emit.code.writer(), value);
-}
+ .local_get, .local_set, .local_tee => {
+ try code.ensureUnusedCapacity(gpa, 11);
+ code.appendAssumeCapacity(@intFromEnum(tags[inst]));
+ leb.writeUleb128(code.fixedWriter(), datas[inst].local) catch unreachable;
-fn emitImm64(emit: *Emit, inst: Mir.Inst.Index) !void {
- const extra_index = emit.mir.instructions.items(.data)[inst].payload;
- const value = emit.mir.extraData(Mir.Imm64, extra_index);
- try emit.code.append(std.wasm.opcode(.i64_const));
- try leb128.writeIleb128(emit.code.writer(), @as(i64, @bitCast(value.data.toU64())));
-}
+ inst += 1;
+ continue :loop tags[inst];
+ },
-fn emitFloat32(emit: *Emit, inst: Mir.Inst.Index) !void {
- const value: f32 = emit.mir.instructions.items(.data)[inst].float32;
- try emit.code.append(std.wasm.opcode(.f32_const));
- try emit.code.writer().writeInt(u32, @bitCast(value), .little);
-}
+ .br_table => {
+ const extra_index = datas[inst].payload;
+ const extra = mir.extraData(Mir.JumpTable, extra_index);
+ const labels = mir.extra[extra.end..][0..extra.data.length];
+ try code.ensureUnusedCapacity(gpa, 11 + 10 * labels.len);
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_table));
+ // -1 because default label is not part of length/depth.
+ leb.writeUleb128(code.fixedWriter(), extra.data.length - 1) catch unreachable;
+ for (labels) |label| leb.writeUleb128(code.fixedWriter(), label) catch unreachable;
+
+ inst += 1;
+ continue :loop tags[inst];
+ },
-fn emitFloat64(emit: *Emit, inst: Mir.Inst.Index) !void {
- const extra_index = emit.mir.instructions.items(.data)[inst].payload;
- const value = emit.mir.extraData(Mir.Float64, extra_index);
- try emit.code.append(std.wasm.opcode(.f64_const));
- try emit.code.writer().writeInt(u64, value.data.toU64(), .little);
-}
+ .call_nav => {
+ try code.ensureUnusedCapacity(gpa, 6);
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call));
+ if (is_obj) {
+ try wasm.out_relocs.append(gpa, .{
+ .offset = @intCast(code.items.len),
+ .pointee = .{ .symbol_index = try wasm.navSymbolIndex(datas[inst].nav_index) },
+ .tag = .function_index_leb,
+ .addend = 0,
+ });
+ code.appendNTimesAssumeCapacity(0, 5);
+ } else {
+ appendOutputFunctionIndex(code, .fromIpNav(wasm, datas[inst].nav_index));
+ }
+
+ inst += 1;
+ continue :loop tags[inst];
+ },
-fn emitMemArg(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
- const extra_index = emit.mir.instructions.items(.data)[inst].payload;
- const mem_arg = emit.mir.extraData(Mir.MemArg, extra_index).data;
- try emit.code.append(@intFromEnum(tag));
- try encodeMemArg(mem_arg, emit.code.writer());
-}
+ .call_indirect => {
+ try code.ensureUnusedCapacity(gpa, 11);
+ const func_ty_index = datas[inst].func_ty;
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call_indirect));
+ if (is_obj) {
+ try wasm.out_relocs.append(gpa, .{
+ .offset = @intCast(code.items.len),
+ .pointee = .{ .type_index = func_ty_index },
+ .tag = .type_index_leb,
+ .addend = 0,
+ });
+ code.appendNTimesAssumeCapacity(0, 5);
+ } else {
+ const index: Wasm.Flush.FuncTypeIndex = .fromTypeIndex(func_ty_index, &wasm.flush_buffer);
+ leb.writeUleb128(code.fixedWriter(), @intFromEnum(index)) catch unreachable;
+ }
+ leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // table index
+
+ inst += 1;
+ continue :loop tags[inst];
+ },
-fn encodeMemArg(mem_arg: Mir.MemArg, writer: anytype) !void {
- // wasm encodes alignment as power of 2, rather than natural alignment
- const encoded_alignment = @ctz(mem_arg.alignment);
- try leb128.writeUleb128(writer, encoded_alignment);
- try leb128.writeUleb128(writer, mem_arg.offset);
-}
+ .call_tag_name => {
+ try code.ensureUnusedCapacity(gpa, 6);
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call));
+ if (is_obj) {
+ try wasm.out_relocs.append(gpa, .{
+ .offset = @intCast(code.items.len),
+ .pointee = .{ .symbol_index = try wasm.tagNameSymbolIndex(datas[inst].ip_index) },
+ .tag = .function_index_leb,
+ .addend = 0,
+ });
+ code.appendNTimesAssumeCapacity(0, 5);
+ } else {
+ appendOutputFunctionIndex(code, .fromTagNameType(wasm, datas[inst].ip_index));
+ }
+
+ inst += 1;
+ continue :loop tags[inst];
+ },
-fn emitCall(emit: *Emit, inst: Mir.Inst.Index) !void {
- const comp = emit.bin_file.base.comp;
- const gpa = comp.gpa;
- const label = emit.mir.instructions.items(.data)[inst].label;
- try emit.code.append(std.wasm.opcode(.call));
- const call_offset = emit.offset();
- var buf: [5]u8 = undefined;
- leb128.writeUnsignedFixed(5, &buf, label);
- try emit.code.appendSlice(&buf);
-
- if (label != 0) {
- const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom;
- const atom = emit.bin_file.getAtomPtr(atom_index);
- try atom.relocs.append(gpa, .{
- .offset = call_offset,
- .index = label,
- .relocation_type = .R_WASM_FUNCTION_INDEX_LEB,
- });
- }
-}
+ .call_intrinsic => {
+ // Although this currently uses `wasm.internString`, note that it
+ // *could* be changed to directly index into a preloaded strings
+ // table initialized based on the `Mir.Intrinsic` enum.
+ const symbol_name = try wasm.internString(@tagName(datas[inst].intrinsic));
+
+ try code.ensureUnusedCapacity(gpa, 6);
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call));
+ if (is_obj) {
+ try wasm.out_relocs.append(gpa, .{
+ .offset = @intCast(code.items.len),
+ .pointee = .{ .symbol_index = try wasm.symbolNameIndex(symbol_name) },
+ .tag = .function_index_leb,
+ .addend = 0,
+ });
+ code.appendNTimesAssumeCapacity(0, 5);
+ } else {
+ appendOutputFunctionIndex(code, .fromSymbolName(wasm, symbol_name));
+ }
+
+ inst += 1;
+ continue :loop tags[inst];
+ },
-fn emitCallIndirect(emit: *Emit, inst: Mir.Inst.Index) !void {
- const type_index = emit.mir.instructions.items(.data)[inst].label;
- try emit.code.append(std.wasm.opcode(.call_indirect));
- // NOTE: If we remove unused function types in the future for incremental
- // linking, we must also emit a relocation for this `type_index`
- const call_offset = emit.offset();
- var buf: [5]u8 = undefined;
- leb128.writeUnsignedFixed(5, &buf, type_index);
- try emit.code.appendSlice(&buf);
- if (type_index != 0) {
- const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom;
- const atom = emit.bin_file.getAtomPtr(atom_index);
- try atom.relocs.append(emit.bin_file.base.comp.gpa, .{
- .offset = call_offset,
- .index = type_index,
- .relocation_type = .R_WASM_TYPE_INDEX_LEB,
- });
- }
- try leb128.writeUleb128(emit.code.writer(), @as(u32, 0)); // TODO: Emit relocation for table index
-}
+ .global_set_sp => {
+ try code.ensureUnusedCapacity(gpa, 6);
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
+ if (is_obj) {
+ try wasm.out_relocs.append(gpa, .{
+ .offset = @intCast(code.items.len),
+ .pointee = .{ .symbol_index = try wasm.stackPointerSymbolIndex() },
+ .tag = .global_index_leb,
+ .addend = 0,
+ });
+ code.appendNTimesAssumeCapacity(0, 5);
+ } else {
+ const sp_global: Wasm.GlobalIndex = .stack_pointer;
+ std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
+ }
+
+ inst += 1;
+ continue :loop tags[inst];
+ },
-fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void {
- const comp = emit.bin_file.base.comp;
- const gpa = comp.gpa;
- const symbol_index = emit.mir.instructions.items(.data)[inst].label;
- try emit.code.append(std.wasm.opcode(.i32_const));
- const index_offset = emit.offset();
- var buf: [5]u8 = undefined;
- leb128.writeUnsignedFixed(5, &buf, symbol_index);
- try emit.code.appendSlice(&buf);
-
- if (symbol_index != 0) {
- const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom;
- const atom = emit.bin_file.getAtomPtr(atom_index);
- try atom.relocs.append(gpa, .{
- .offset = index_offset,
- .index = symbol_index,
- .relocation_type = .R_WASM_TABLE_INDEX_SLEB,
- });
- }
-}
+ .f32_const => {
+ try code.ensureUnusedCapacity(gpa, 5);
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.f32_const));
+ std.mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @bitCast(datas[inst].float32), .little);
-fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
- const extra_index = emit.mir.instructions.items(.data)[inst].payload;
- const mem = emit.mir.extraData(Mir.Memory, extra_index).data;
- const mem_offset = emit.offset() + 1;
- const comp = emit.bin_file.base.comp;
- const gpa = comp.gpa;
- const target = comp.root_mod.resolved_target.result;
- const is_wasm32 = target.cpu.arch == .wasm32;
- if (is_wasm32) {
- try emit.code.append(std.wasm.opcode(.i32_const));
- var buf: [5]u8 = undefined;
- leb128.writeUnsignedFixed(5, &buf, mem.pointer);
- try emit.code.appendSlice(&buf);
- } else {
- try emit.code.append(std.wasm.opcode(.i64_const));
- var buf: [10]u8 = undefined;
- leb128.writeUnsignedFixed(10, &buf, mem.pointer);
- try emit.code.appendSlice(&buf);
- }
+ inst += 1;
+ continue :loop tags[inst];
+ },
- if (mem.pointer != 0) {
- const atom_index = emit.bin_file.zig_object.?.navs.get(emit.owner_nav).?.atom;
- const atom = emit.bin_file.getAtomPtr(atom_index);
- try atom.relocs.append(gpa, .{
- .offset = mem_offset,
- .index = mem.pointer,
- .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64,
- .addend = @as(i32, @intCast(mem.offset)),
- });
- }
-}
+ .f64_const => {
+ try code.ensureUnusedCapacity(gpa, 9);
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.f64_const));
+ const float64 = mir.extraData(Mir.Float64, datas[inst].payload).data;
+ std.mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), float64.toInt(), .little);
-fn emitExtended(emit: *Emit, inst: Mir.Inst.Index) !void {
- const extra_index = emit.mir.instructions.items(.data)[inst].payload;
- const opcode = emit.mir.extra[extra_index];
- const writer = emit.code.writer();
- try emit.code.append(std.wasm.opcode(.misc_prefix));
- try leb128.writeUleb128(writer, opcode);
- switch (@as(std.wasm.MiscOpcode, @enumFromInt(opcode))) {
- // bulk-memory opcodes
- .data_drop => {
- const segment = emit.mir.extra[extra_index + 1];
- try leb128.writeUleb128(writer, segment);
- },
- .memory_init => {
- const segment = emit.mir.extra[extra_index + 1];
- try leb128.writeUleb128(writer, segment);
- try leb128.writeUleb128(writer, @as(u32, 0)); // memory index
+ inst += 1;
+ continue :loop tags[inst];
},
- .memory_fill => {
- try leb128.writeUleb128(writer, @as(u32, 0)); // memory index
+ .i32_const => {
+ try code.ensureUnusedCapacity(gpa, 6);
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
+ leb.writeIleb128(code.fixedWriter(), datas[inst].imm32) catch unreachable;
+
+ inst += 1;
+ continue :loop tags[inst];
},
- .memory_copy => {
- try leb128.writeUleb128(writer, @as(u32, 0)); // dst memory index
- try leb128.writeUleb128(writer, @as(u32, 0)); // src memory index
+ .i64_const => {
+ try code.ensureUnusedCapacity(gpa, 11);
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
+ const int64: i64 = @bitCast(mir.extraData(Mir.Imm64, datas[inst].payload).data.toInt());
+ leb.writeIleb128(code.fixedWriter(), int64) catch unreachable;
+
+ inst += 1;
+ continue :loop tags[inst];
},
- // nontrapping-float-to-int-conversion opcodes
- .i32_trunc_sat_f32_s,
- .i32_trunc_sat_f32_u,
- .i32_trunc_sat_f64_s,
- .i32_trunc_sat_f64_u,
- .i64_trunc_sat_f32_s,
- .i64_trunc_sat_f32_u,
- .i64_trunc_sat_f64_s,
- .i64_trunc_sat_f64_u,
- => {}, // opcode already written
- else => |tag| return emit.fail("TODO: Implement extension instruction: {s}\n", .{@tagName(tag)}),
- }
-}
-
-fn emitSimd(emit: *Emit, inst: Mir.Inst.Index) !void {
- const extra_index = emit.mir.instructions.items(.data)[inst].payload;
- const opcode = emit.mir.extra[extra_index];
- const writer = emit.code.writer();
- try emit.code.append(std.wasm.opcode(.simd_prefix));
- try leb128.writeUleb128(writer, opcode);
- switch (@as(std.wasm.SimdOpcode, @enumFromInt(opcode))) {
- .v128_store,
- .v128_load,
- .v128_load8_splat,
- .v128_load16_splat,
- .v128_load32_splat,
- .v128_load64_splat,
+ .i32_load,
+ .i64_load,
+ .f32_load,
+ .f64_load,
+ .i32_load8_s,
+ .i32_load8_u,
+ .i32_load16_s,
+ .i32_load16_u,
+ .i64_load8_s,
+ .i64_load8_u,
+ .i64_load16_s,
+ .i64_load16_u,
+ .i64_load32_s,
+ .i64_load32_u,
+ .i32_store,
+ .i64_store,
+ .f32_store,
+ .f64_store,
+ .i32_store8,
+ .i32_store16,
+ .i64_store8,
+ .i64_store16,
+ .i64_store32,
=> {
- const mem_arg = emit.mir.extraData(Mir.MemArg, extra_index + 1).data;
- try encodeMemArg(mem_arg, writer);
+ try code.ensureUnusedCapacity(gpa, 1 + 20);
+ code.appendAssumeCapacity(@intFromEnum(tags[inst]));
+ encodeMemArg(code, mir.extraData(Mir.MemArg, datas[inst].payload).data);
+ inst += 1;
+ continue :loop tags[inst];
},
- .v128_const,
- .i8x16_shuffle,
- => {
- const simd_value = emit.mir.extra[extra_index + 1 ..][0..4];
- try writer.writeAll(std.mem.asBytes(simd_value));
- },
- .i8x16_extract_lane_s,
- .i8x16_extract_lane_u,
- .i8x16_replace_lane,
- .i16x8_extract_lane_s,
- .i16x8_extract_lane_u,
- .i16x8_replace_lane,
- .i32x4_extract_lane,
- .i32x4_replace_lane,
- .i64x2_extract_lane,
- .i64x2_replace_lane,
- .f32x4_extract_lane,
- .f32x4_replace_lane,
- .f64x2_extract_lane,
- .f64x2_replace_lane,
+
+ .end,
+ .@"return",
+ .@"unreachable",
+ .select,
+ .i32_eqz,
+ .i32_eq,
+ .i32_ne,
+ .i32_lt_s,
+ .i32_lt_u,
+ .i32_gt_s,
+ .i32_gt_u,
+ .i32_le_s,
+ .i32_le_u,
+ .i32_ge_s,
+ .i32_ge_u,
+ .i64_eqz,
+ .i64_eq,
+ .i64_ne,
+ .i64_lt_s,
+ .i64_lt_u,
+ .i64_gt_s,
+ .i64_gt_u,
+ .i64_le_s,
+ .i64_le_u,
+ .i64_ge_s,
+ .i64_ge_u,
+ .f32_eq,
+ .f32_ne,
+ .f32_lt,
+ .f32_gt,
+ .f32_le,
+ .f32_ge,
+ .f64_eq,
+ .f64_ne,
+ .f64_lt,
+ .f64_gt,
+ .f64_le,
+ .f64_ge,
+ .i32_add,
+ .i32_sub,
+ .i32_mul,
+ .i32_div_s,
+ .i32_div_u,
+ .i32_and,
+ .i32_or,
+ .i32_xor,
+ .i32_shl,
+ .i32_shr_s,
+ .i32_shr_u,
+ .i64_add,
+ .i64_sub,
+ .i64_mul,
+ .i64_div_s,
+ .i64_div_u,
+ .i64_and,
+ .i64_or,
+ .i64_xor,
+ .i64_shl,
+ .i64_shr_s,
+ .i64_shr_u,
+ .f32_abs,
+ .f32_neg,
+ .f32_ceil,
+ .f32_floor,
+ .f32_trunc,
+ .f32_nearest,
+ .f32_sqrt,
+ .f32_add,
+ .f32_sub,
+ .f32_mul,
+ .f32_div,
+ .f32_min,
+ .f32_max,
+ .f32_copysign,
+ .f64_abs,
+ .f64_neg,
+ .f64_ceil,
+ .f64_floor,
+ .f64_trunc,
+ .f64_nearest,
+ .f64_sqrt,
+ .f64_add,
+ .f64_sub,
+ .f64_mul,
+ .f64_div,
+ .f64_min,
+ .f64_max,
+ .f64_copysign,
+ .i32_wrap_i64,
+ .i64_extend_i32_s,
+ .i64_extend_i32_u,
+ .i32_extend8_s,
+ .i32_extend16_s,
+ .i64_extend8_s,
+ .i64_extend16_s,
+ .i64_extend32_s,
+ .f32_demote_f64,
+ .f64_promote_f32,
+ .i32_reinterpret_f32,
+ .i64_reinterpret_f64,
+ .f32_reinterpret_i32,
+ .f64_reinterpret_i64,
+ .i32_trunc_f32_s,
+ .i32_trunc_f32_u,
+ .i32_trunc_f64_s,
+ .i32_trunc_f64_u,
+ .i64_trunc_f32_s,
+ .i64_trunc_f32_u,
+ .i64_trunc_f64_s,
+ .i64_trunc_f64_u,
+ .f32_convert_i32_s,
+ .f32_convert_i32_u,
+ .f32_convert_i64_s,
+ .f32_convert_i64_u,
+ .f64_convert_i32_s,
+ .f64_convert_i32_u,
+ .f64_convert_i64_s,
+ .f64_convert_i64_u,
+ .i32_rem_s,
+ .i32_rem_u,
+ .i64_rem_s,
+ .i64_rem_u,
+ .i32_popcnt,
+ .i64_popcnt,
+ .i32_clz,
+ .i32_ctz,
+ .i64_clz,
+ .i64_ctz,
=> {
- try writer.writeByte(@as(u8, @intCast(emit.mir.extra[extra_index + 1])));
+ try code.append(gpa, @intFromEnum(tags[inst]));
+ inst += 1;
+ continue :loop tags[inst];
},
- .i8x16_splat,
- .i16x8_splat,
- .i32x4_splat,
- .i64x2_splat,
- .f32x4_splat,
- .f64x2_splat,
- => {}, // opcode already written
- else => |tag| return emit.fail("TODO: Implement simd instruction: {s}", .{@tagName(tag)}),
- }
-}
-fn emitAtomic(emit: *Emit, inst: Mir.Inst.Index) !void {
- const extra_index = emit.mir.instructions.items(.data)[inst].payload;
- const opcode = emit.mir.extra[extra_index];
- const writer = emit.code.writer();
- try emit.code.append(std.wasm.opcode(.atomics_prefix));
- try leb128.writeUleb128(writer, opcode);
- switch (@as(std.wasm.AtomicsOpcode, @enumFromInt(opcode))) {
- .i32_atomic_load,
- .i64_atomic_load,
- .i32_atomic_load8_u,
- .i32_atomic_load16_u,
- .i64_atomic_load8_u,
- .i64_atomic_load16_u,
- .i64_atomic_load32_u,
- .i32_atomic_store,
- .i64_atomic_store,
- .i32_atomic_store8,
- .i32_atomic_store16,
- .i64_atomic_store8,
- .i64_atomic_store16,
- .i64_atomic_store32,
- .i32_atomic_rmw_add,
- .i64_atomic_rmw_add,
- .i32_atomic_rmw8_add_u,
- .i32_atomic_rmw16_add_u,
- .i64_atomic_rmw8_add_u,
- .i64_atomic_rmw16_add_u,
- .i64_atomic_rmw32_add_u,
- .i32_atomic_rmw_sub,
- .i64_atomic_rmw_sub,
- .i32_atomic_rmw8_sub_u,
- .i32_atomic_rmw16_sub_u,
- .i64_atomic_rmw8_sub_u,
- .i64_atomic_rmw16_sub_u,
- .i64_atomic_rmw32_sub_u,
- .i32_atomic_rmw_and,
- .i64_atomic_rmw_and,
- .i32_atomic_rmw8_and_u,
- .i32_atomic_rmw16_and_u,
- .i64_atomic_rmw8_and_u,
- .i64_atomic_rmw16_and_u,
- .i64_atomic_rmw32_and_u,
- .i32_atomic_rmw_or,
- .i64_atomic_rmw_or,
- .i32_atomic_rmw8_or_u,
- .i32_atomic_rmw16_or_u,
- .i64_atomic_rmw8_or_u,
- .i64_atomic_rmw16_or_u,
- .i64_atomic_rmw32_or_u,
- .i32_atomic_rmw_xor,
- .i64_atomic_rmw_xor,
- .i32_atomic_rmw8_xor_u,
- .i32_atomic_rmw16_xor_u,
- .i64_atomic_rmw8_xor_u,
- .i64_atomic_rmw16_xor_u,
- .i64_atomic_rmw32_xor_u,
- .i32_atomic_rmw_xchg,
- .i64_atomic_rmw_xchg,
- .i32_atomic_rmw8_xchg_u,
- .i32_atomic_rmw16_xchg_u,
- .i64_atomic_rmw8_xchg_u,
- .i64_atomic_rmw16_xchg_u,
- .i64_atomic_rmw32_xchg_u,
-
- .i32_atomic_rmw_cmpxchg,
- .i64_atomic_rmw_cmpxchg,
- .i32_atomic_rmw8_cmpxchg_u,
- .i32_atomic_rmw16_cmpxchg_u,
- .i64_atomic_rmw8_cmpxchg_u,
- .i64_atomic_rmw16_cmpxchg_u,
- .i64_atomic_rmw32_cmpxchg_u,
- => {
- const mem_arg = emit.mir.extraData(Mir.MemArg, extra_index + 1).data;
- try encodeMemArg(mem_arg, writer);
+ .misc_prefix => {
+ try code.ensureUnusedCapacity(gpa, 6 + 6);
+ const extra_index = datas[inst].payload;
+ const opcode = mir.extra[extra_index];
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix));
+ leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
+ switch (@as(std.wasm.MiscOpcode, @enumFromInt(opcode))) {
+ // bulk-memory opcodes
+ .data_drop => {
+ const segment = mir.extra[extra_index + 1];
+ leb.writeUleb128(code.fixedWriter(), segment) catch unreachable;
+
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .memory_init => {
+ const segment = mir.extra[extra_index + 1];
+ leb.writeUleb128(code.fixedWriter(), segment) catch unreachable;
+ leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // memory index
+
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .memory_fill => {
+ leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // memory index
+
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .memory_copy => {
+ leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // dst memory index
+ leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // src memory index
+
+ inst += 1;
+ continue :loop tags[inst];
+ },
+
+ // nontrapping-float-to-int-conversion opcodes
+ .i32_trunc_sat_f32_s,
+ .i32_trunc_sat_f32_u,
+ .i32_trunc_sat_f64_s,
+ .i32_trunc_sat_f64_u,
+ .i64_trunc_sat_f32_s,
+ .i64_trunc_sat_f32_u,
+ .i64_trunc_sat_f64_s,
+ .i64_trunc_sat_f64_u,
+ => {
+ inst += 1;
+ continue :loop tags[inst];
+ },
+
+ .table_init => @panic("TODO"),
+ .elem_drop => @panic("TODO"),
+ .table_copy => @panic("TODO"),
+ .table_grow => @panic("TODO"),
+ .table_size => @panic("TODO"),
+ .table_fill => @panic("TODO"),
+
+ _ => unreachable,
+ }
+ comptime unreachable;
},
- .atomic_fence => {
- // TODO: When multi-memory proposal is accepted and implemented in the compiler,
- // change this to (user-)specified index, rather than hardcode it to memory index 0.
- const memory_index: u32 = 0;
- try leb128.writeUleb128(writer, memory_index);
+ .simd_prefix => {
+ try code.ensureUnusedCapacity(gpa, 6 + 20);
+ const extra_index = datas[inst].payload;
+ const opcode = mir.extra[extra_index];
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.simd_prefix));
+ leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
+ switch (@as(std.wasm.SimdOpcode, @enumFromInt(opcode))) {
+ .v128_store,
+ .v128_load,
+ .v128_load8_splat,
+ .v128_load16_splat,
+ .v128_load32_splat,
+ .v128_load64_splat,
+ => {
+ encodeMemArg(code, mir.extraData(Mir.MemArg, extra_index + 1).data);
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .v128_const, .i8x16_shuffle => {
+ code.appendSliceAssumeCapacity(std.mem.asBytes(mir.extra[extra_index + 1 ..][0..4]));
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .i8x16_extract_lane_s,
+ .i8x16_extract_lane_u,
+ .i8x16_replace_lane,
+ .i16x8_extract_lane_s,
+ .i16x8_extract_lane_u,
+ .i16x8_replace_lane,
+ .i32x4_extract_lane,
+ .i32x4_replace_lane,
+ .i64x2_extract_lane,
+ .i64x2_replace_lane,
+ .f32x4_extract_lane,
+ .f32x4_replace_lane,
+ .f64x2_extract_lane,
+ .f64x2_replace_lane,
+ => {
+ code.appendAssumeCapacity(@intCast(mir.extra[extra_index + 1]));
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .i8x16_splat,
+ .i16x8_splat,
+ .i32x4_splat,
+ .i64x2_splat,
+ .f32x4_splat,
+ .f64x2_splat,
+ => {
+ inst += 1;
+ continue :loop tags[inst];
+ },
+
+ .v128_load8x8_s => @panic("TODO"),
+ .v128_load8x8_u => @panic("TODO"),
+ .v128_load16x4_s => @panic("TODO"),
+ .v128_load16x4_u => @panic("TODO"),
+ .v128_load32x2_s => @panic("TODO"),
+ .v128_load32x2_u => @panic("TODO"),
+ .i8x16_swizzle => @panic("TODO"),
+ .i8x16_eq => @panic("TODO"),
+ .i16x8_eq => @panic("TODO"),
+ .i32x4_eq => @panic("TODO"),
+ .i8x16_ne => @panic("TODO"),
+ .i16x8_ne => @panic("TODO"),
+ .i32x4_ne => @panic("TODO"),
+ .i8x16_lt_s => @panic("TODO"),
+ .i16x8_lt_s => @panic("TODO"),
+ .i32x4_lt_s => @panic("TODO"),
+ .i8x16_lt_u => @panic("TODO"),
+ .i16x8_lt_u => @panic("TODO"),
+ .i32x4_lt_u => @panic("TODO"),
+ .i8x16_gt_s => @panic("TODO"),
+ .i16x8_gt_s => @panic("TODO"),
+ .i32x4_gt_s => @panic("TODO"),
+ .i8x16_gt_u => @panic("TODO"),
+ .i16x8_gt_u => @panic("TODO"),
+ .i32x4_gt_u => @panic("TODO"),
+ .i8x16_le_s => @panic("TODO"),
+ .i16x8_le_s => @panic("TODO"),
+ .i32x4_le_s => @panic("TODO"),
+ .i8x16_le_u => @panic("TODO"),
+ .i16x8_le_u => @panic("TODO"),
+ .i32x4_le_u => @panic("TODO"),
+ .i8x16_ge_s => @panic("TODO"),
+ .i16x8_ge_s => @panic("TODO"),
+ .i32x4_ge_s => @panic("TODO"),
+ .i8x16_ge_u => @panic("TODO"),
+ .i16x8_ge_u => @panic("TODO"),
+ .i32x4_ge_u => @panic("TODO"),
+ .f32x4_eq => @panic("TODO"),
+ .f64x2_eq => @panic("TODO"),
+ .f32x4_ne => @panic("TODO"),
+ .f64x2_ne => @panic("TODO"),
+ .f32x4_lt => @panic("TODO"),
+ .f64x2_lt => @panic("TODO"),
+ .f32x4_gt => @panic("TODO"),
+ .f64x2_gt => @panic("TODO"),
+ .f32x4_le => @panic("TODO"),
+ .f64x2_le => @panic("TODO"),
+ .f32x4_ge => @panic("TODO"),
+ .f64x2_ge => @panic("TODO"),
+ .v128_not => @panic("TODO"),
+ .v128_and => @panic("TODO"),
+ .v128_andnot => @panic("TODO"),
+ .v128_or => @panic("TODO"),
+ .v128_xor => @panic("TODO"),
+ .v128_bitselect => @panic("TODO"),
+ .v128_any_true => @panic("TODO"),
+ .v128_load8_lane => @panic("TODO"),
+ .v128_load16_lane => @panic("TODO"),
+ .v128_load32_lane => @panic("TODO"),
+ .v128_load64_lane => @panic("TODO"),
+ .v128_store8_lane => @panic("TODO"),
+ .v128_store16_lane => @panic("TODO"),
+ .v128_store32_lane => @panic("TODO"),
+ .v128_store64_lane => @panic("TODO"),
+ .v128_load32_zero => @panic("TODO"),
+ .v128_load64_zero => @panic("TODO"),
+ .f32x4_demote_f64x2_zero => @panic("TODO"),
+ .f64x2_promote_low_f32x4 => @panic("TODO"),
+ .i8x16_abs => @panic("TODO"),
+ .i16x8_abs => @panic("TODO"),
+ .i32x4_abs => @panic("TODO"),
+ .i64x2_abs => @panic("TODO"),
+ .i8x16_neg => @panic("TODO"),
+ .i16x8_neg => @panic("TODO"),
+ .i32x4_neg => @panic("TODO"),
+ .i64x2_neg => @panic("TODO"),
+ .i8x16_popcnt => @panic("TODO"),
+ .i16x8_q15mulr_sat_s => @panic("TODO"),
+ .i8x16_all_true => @panic("TODO"),
+ .i16x8_all_true => @panic("TODO"),
+ .i32x4_all_true => @panic("TODO"),
+ .i64x2_all_true => @panic("TODO"),
+ .i8x16_bitmask => @panic("TODO"),
+ .i16x8_bitmask => @panic("TODO"),
+ .i32x4_bitmask => @panic("TODO"),
+ .i64x2_bitmask => @panic("TODO"),
+ .i8x16_narrow_i16x8_s => @panic("TODO"),
+ .i16x8_narrow_i32x4_s => @panic("TODO"),
+ .i8x16_narrow_i16x8_u => @panic("TODO"),
+ .i16x8_narrow_i32x4_u => @panic("TODO"),
+ .f32x4_ceil => @panic("TODO"),
+ .i16x8_extend_low_i8x16_s => @panic("TODO"),
+ .i32x4_extend_low_i16x8_s => @panic("TODO"),
+ .i64x2_extend_low_i32x4_s => @panic("TODO"),
+ .f32x4_floor => @panic("TODO"),
+ .i16x8_extend_high_i8x16_s => @panic("TODO"),
+ .i32x4_extend_high_i16x8_s => @panic("TODO"),
+ .i64x2_extend_high_i32x4_s => @panic("TODO"),
+ .f32x4_trunc => @panic("TODO"),
+ .i16x8_extend_low_i8x16_u => @panic("TODO"),
+ .i32x4_extend_low_i16x8_u => @panic("TODO"),
+ .i64x2_extend_low_i32x4_u => @panic("TODO"),
+ .f32x4_nearest => @panic("TODO"),
+ .i16x8_extend_high_i8x16_u => @panic("TODO"),
+ .i32x4_extend_high_i16x8_u => @panic("TODO"),
+ .i64x2_extend_high_i32x4_u => @panic("TODO"),
+ .i8x16_shl => @panic("TODO"),
+ .i16x8_shl => @panic("TODO"),
+ .i32x4_shl => @panic("TODO"),
+ .i64x2_shl => @panic("TODO"),
+ .i8x16_shr_s => @panic("TODO"),
+ .i16x8_shr_s => @panic("TODO"),
+ .i32x4_shr_s => @panic("TODO"),
+ .i64x2_shr_s => @panic("TODO"),
+ .i8x16_shr_u => @panic("TODO"),
+ .i16x8_shr_u => @panic("TODO"),
+ .i32x4_shr_u => @panic("TODO"),
+ .i64x2_shr_u => @panic("TODO"),
+ .i8x16_add => @panic("TODO"),
+ .i16x8_add => @panic("TODO"),
+ .i32x4_add => @panic("TODO"),
+ .i64x2_add => @panic("TODO"),
+ .i8x16_add_sat_s => @panic("TODO"),
+ .i16x8_add_sat_s => @panic("TODO"),
+ .i8x16_add_sat_u => @panic("TODO"),
+ .i16x8_add_sat_u => @panic("TODO"),
+ .i8x16_sub => @panic("TODO"),
+ .i16x8_sub => @panic("TODO"),
+ .i32x4_sub => @panic("TODO"),
+ .i64x2_sub => @panic("TODO"),
+ .i8x16_sub_sat_s => @panic("TODO"),
+ .i16x8_sub_sat_s => @panic("TODO"),
+ .i8x16_sub_sat_u => @panic("TODO"),
+ .i16x8_sub_sat_u => @panic("TODO"),
+ .f64x2_ceil => @panic("TODO"),
+ .f64x2_nearest => @panic("TODO"),
+ .f64x2_floor => @panic("TODO"),
+ .i16x8_mul => @panic("TODO"),
+ .i32x4_mul => @panic("TODO"),
+ .i64x2_mul => @panic("TODO"),
+ .i8x16_min_s => @panic("TODO"),
+ .i16x8_min_s => @panic("TODO"),
+ .i32x4_min_s => @panic("TODO"),
+ .i64x2_eq => @panic("TODO"),
+ .i8x16_min_u => @panic("TODO"),
+ .i16x8_min_u => @panic("TODO"),
+ .i32x4_min_u => @panic("TODO"),
+ .i64x2_ne => @panic("TODO"),
+ .i8x16_max_s => @panic("TODO"),
+ .i16x8_max_s => @panic("TODO"),
+ .i32x4_max_s => @panic("TODO"),
+ .i64x2_lt_s => @panic("TODO"),
+ .i8x16_max_u => @panic("TODO"),
+ .i16x8_max_u => @panic("TODO"),
+ .i32x4_max_u => @panic("TODO"),
+ .i64x2_gt_s => @panic("TODO"),
+ .f64x2_trunc => @panic("TODO"),
+ .i32x4_dot_i16x8_s => @panic("TODO"),
+ .i64x2_le_s => @panic("TODO"),
+ .i8x16_avgr_u => @panic("TODO"),
+ .i16x8_avgr_u => @panic("TODO"),
+ .i64x2_ge_s => @panic("TODO"),
+ .i16x8_extadd_pairwise_i8x16_s => @panic("TODO"),
+ .i16x8_extmul_low_i8x16_s => @panic("TODO"),
+ .i32x4_extmul_low_i16x8_s => @panic("TODO"),
+ .i64x2_extmul_low_i32x4_s => @panic("TODO"),
+ .i16x8_extadd_pairwise_i8x16_u => @panic("TODO"),
+ .i16x8_extmul_high_i8x16_s => @panic("TODO"),
+ .i32x4_extmul_high_i16x8_s => @panic("TODO"),
+ .i64x2_extmul_high_i32x4_s => @panic("TODO"),
+ .i32x4_extadd_pairwise_i16x8_s => @panic("TODO"),
+ .i16x8_extmul_low_i8x16_u => @panic("TODO"),
+ .i32x4_extmul_low_i16x8_u => @panic("TODO"),
+ .i64x2_extmul_low_i32x4_u => @panic("TODO"),
+ .i32x4_extadd_pairwise_i16x8_u => @panic("TODO"),
+ .i16x8_extmul_high_i8x16_u => @panic("TODO"),
+ .i32x4_extmul_high_i16x8_u => @panic("TODO"),
+ .i64x2_extmul_high_i32x4_u => @panic("TODO"),
+ .f32x4_abs => @panic("TODO"),
+ .f64x2_abs => @panic("TODO"),
+ .f32x4_neg => @panic("TODO"),
+ .f64x2_neg => @panic("TODO"),
+ .f32x4_sqrt => @panic("TODO"),
+ .f64x2_sqrt => @panic("TODO"),
+ .f32x4_add => @panic("TODO"),
+ .f64x2_add => @panic("TODO"),
+ .f32x4_sub => @panic("TODO"),
+ .f64x2_sub => @panic("TODO"),
+ .f32x4_mul => @panic("TODO"),
+ .f64x2_mul => @panic("TODO"),
+ .f32x4_div => @panic("TODO"),
+ .f64x2_div => @panic("TODO"),
+ .f32x4_min => @panic("TODO"),
+ .f64x2_min => @panic("TODO"),
+ .f32x4_max => @panic("TODO"),
+ .f64x2_max => @panic("TODO"),
+ .f32x4_pmin => @panic("TODO"),
+ .f64x2_pmin => @panic("TODO"),
+ .f32x4_pmax => @panic("TODO"),
+ .f64x2_pmax => @panic("TODO"),
+ .i32x4_trunc_sat_f32x4_s => @panic("TODO"),
+ .i32x4_trunc_sat_f32x4_u => @panic("TODO"),
+ .f32x4_convert_i32x4_s => @panic("TODO"),
+ .f32x4_convert_i32x4_u => @panic("TODO"),
+ .i32x4_trunc_sat_f64x2_s_zero => @panic("TODO"),
+ .i32x4_trunc_sat_f64x2_u_zero => @panic("TODO"),
+ .f64x2_convert_low_i32x4_s => @panic("TODO"),
+ .f64x2_convert_low_i32x4_u => @panic("TODO"),
+ .i8x16_relaxed_swizzle => @panic("TODO"),
+ .i32x4_relaxed_trunc_f32x4_s => @panic("TODO"),
+ .i32x4_relaxed_trunc_f32x4_u => @panic("TODO"),
+ .i32x4_relaxed_trunc_f64x2_s_zero => @panic("TODO"),
+ .i32x4_relaxed_trunc_f64x2_u_zero => @panic("TODO"),
+ .f32x4_relaxed_madd => @panic("TODO"),
+ .f32x4_relaxed_nmadd => @panic("TODO"),
+ .f64x2_relaxed_madd => @panic("TODO"),
+ .f64x2_relaxed_nmadd => @panic("TODO"),
+ .i8x16_relaxed_laneselect => @panic("TODO"),
+ .i16x8_relaxed_laneselect => @panic("TODO"),
+ .i32x4_relaxed_laneselect => @panic("TODO"),
+ .i64x2_relaxed_laneselect => @panic("TODO"),
+ .f32x4_relaxed_min => @panic("TODO"),
+ .f32x4_relaxed_max => @panic("TODO"),
+ .f64x2_relaxed_min => @panic("TODO"),
+ .f64x2_relaxed_max => @panic("TODO"),
+ .i16x8_relaxed_q15mulr_s => @panic("TODO"),
+ .i16x8_relaxed_dot_i8x16_i7x16_s => @panic("TODO"),
+ .i32x4_relaxed_dot_i8x16_i7x16_add_s => @panic("TODO"),
+ .f32x4_relaxed_dot_bf16x8_add_f32x4 => @panic("TODO"),
+ }
+ comptime unreachable;
+ },
+ .atomics_prefix => {
+ try code.ensureUnusedCapacity(gpa, 6 + 20);
+
+ const extra_index = datas[inst].payload;
+ const opcode = mir.extra[extra_index];
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
+ leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
+ switch (@as(std.wasm.AtomicsOpcode, @enumFromInt(opcode))) {
+ .i32_atomic_load,
+ .i64_atomic_load,
+ .i32_atomic_load8_u,
+ .i32_atomic_load16_u,
+ .i64_atomic_load8_u,
+ .i64_atomic_load16_u,
+ .i64_atomic_load32_u,
+ .i32_atomic_store,
+ .i64_atomic_store,
+ .i32_atomic_store8,
+ .i32_atomic_store16,
+ .i64_atomic_store8,
+ .i64_atomic_store16,
+ .i64_atomic_store32,
+ .i32_atomic_rmw_add,
+ .i64_atomic_rmw_add,
+ .i32_atomic_rmw8_add_u,
+ .i32_atomic_rmw16_add_u,
+ .i64_atomic_rmw8_add_u,
+ .i64_atomic_rmw16_add_u,
+ .i64_atomic_rmw32_add_u,
+ .i32_atomic_rmw_sub,
+ .i64_atomic_rmw_sub,
+ .i32_atomic_rmw8_sub_u,
+ .i32_atomic_rmw16_sub_u,
+ .i64_atomic_rmw8_sub_u,
+ .i64_atomic_rmw16_sub_u,
+ .i64_atomic_rmw32_sub_u,
+ .i32_atomic_rmw_and,
+ .i64_atomic_rmw_and,
+ .i32_atomic_rmw8_and_u,
+ .i32_atomic_rmw16_and_u,
+ .i64_atomic_rmw8_and_u,
+ .i64_atomic_rmw16_and_u,
+ .i64_atomic_rmw32_and_u,
+ .i32_atomic_rmw_or,
+ .i64_atomic_rmw_or,
+ .i32_atomic_rmw8_or_u,
+ .i32_atomic_rmw16_or_u,
+ .i64_atomic_rmw8_or_u,
+ .i64_atomic_rmw16_or_u,
+ .i64_atomic_rmw32_or_u,
+ .i32_atomic_rmw_xor,
+ .i64_atomic_rmw_xor,
+ .i32_atomic_rmw8_xor_u,
+ .i32_atomic_rmw16_xor_u,
+ .i64_atomic_rmw8_xor_u,
+ .i64_atomic_rmw16_xor_u,
+ .i64_atomic_rmw32_xor_u,
+ .i32_atomic_rmw_xchg,
+ .i64_atomic_rmw_xchg,
+ .i32_atomic_rmw8_xchg_u,
+ .i32_atomic_rmw16_xchg_u,
+ .i64_atomic_rmw8_xchg_u,
+ .i64_atomic_rmw16_xchg_u,
+ .i64_atomic_rmw32_xchg_u,
+
+ .i32_atomic_rmw_cmpxchg,
+ .i64_atomic_rmw_cmpxchg,
+ .i32_atomic_rmw8_cmpxchg_u,
+ .i32_atomic_rmw16_cmpxchg_u,
+ .i64_atomic_rmw8_cmpxchg_u,
+ .i64_atomic_rmw16_cmpxchg_u,
+ .i64_atomic_rmw32_cmpxchg_u,
+ => {
+ const mem_arg = mir.extraData(Mir.MemArg, extra_index + 1).data;
+ encodeMemArg(code, mem_arg);
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .atomic_fence => {
+ // Hard-codes memory index 0 since multi-memory proposal is
+ // not yet accepted nor implemented.
+ const memory_index: u32 = 0;
+ leb.writeUleb128(code.fixedWriter(), memory_index) catch unreachable;
+ inst += 1;
+ continue :loop tags[inst];
+ },
+ .memory_atomic_notify => @panic("TODO"),
+ .memory_atomic_wait32 => @panic("TODO"),
+ .memory_atomic_wait64 => @panic("TODO"),
+ }
+ comptime unreachable;
},
- else => |tag| return emit.fail("TODO: Implement atomic instruction: {s}", .{@tagName(tag)}),
}
+ comptime unreachable;
}
-fn emitMemFill(emit: *Emit) !void {
- try emit.code.append(0xFC);
- try emit.code.append(0x0B);
- // When multi-memory proposal reaches phase 4, we
- // can emit a different memory index here.
- // For now we will always emit index 0.
- try leb128.writeUleb128(emit.code.writer(), @as(u32, 0));
-}
-
-fn emitDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void {
- const extra_index = emit.mir.instructions.items(.data)[inst].payload;
- const dbg_line = emit.mir.extraData(Mir.DbgLineColumn, extra_index).data;
- try emit.dbgAdvancePCAndLine(dbg_line.line, dbg_line.column);
+/// Asserts 20 unused capacity.
+fn encodeMemArg(code: *std.ArrayListUnmanaged(u8), mem_arg: Mir.MemArg) void {
+ assert(code.unusedCapacitySlice().len >= 20);
+ // Wasm encodes alignment as power of 2, rather than natural alignment.
+ const encoded_alignment = @ctz(mem_arg.alignment);
+ leb.writeUleb128(code.fixedWriter(), encoded_alignment) catch unreachable;
+ leb.writeUleb128(code.fixedWriter(), mem_arg.offset) catch unreachable;
}
-fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void {
- if (emit.dbg_output != .dwarf) return;
+fn uavRefOffObj(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.UavRefOffObj, is_wasm32: bool) !void {
+ const comp = wasm.base.comp;
+ const gpa = comp.gpa;
+ const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
- const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line));
- const delta_pc = emit.offset() - emit.prev_di_offset;
- // TODO: This must emit a relocation to calculate the offset relative
- // to the code section start.
- try emit.dbg_output.dwarf.advancePCAndLine(delta_line, delta_pc);
+ try code.ensureUnusedCapacity(gpa, 11);
+ code.appendAssumeCapacity(@intFromEnum(opcode));
- emit.prev_di_line = line;
- emit.prev_di_column = column;
- emit.prev_di_offset = emit.offset();
+ try wasm.out_relocs.append(gpa, .{
+ .offset = @intCast(code.items.len),
+ .pointee = .{ .symbol_index = try wasm.uavSymbolIndex(data.uav_obj.key(wasm).*) },
+ .tag = if (is_wasm32) .memory_addr_leb else .memory_addr_leb64,
+ .addend = data.offset,
+ });
+ code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
}
-fn emitDbgPrologueEnd(emit: *Emit) !void {
- if (emit.dbg_output != .dwarf) return;
+fn uavRefOffExe(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.UavRefOffExe, is_wasm32: bool) !void {
+ const comp = wasm.base.comp;
+ const gpa = comp.gpa;
+ const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
+
+ try code.ensureUnusedCapacity(gpa, 11);
+ code.appendAssumeCapacity(@intFromEnum(opcode));
- try emit.dbg_output.dwarf.setPrologueEnd();
- try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
+ const addr = wasm.uavAddr(data.uav_exe);
+ leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + data.offset))) catch unreachable;
}
-fn emitDbgEpilogueBegin(emit: *Emit) !void {
- if (emit.dbg_output != .dwarf) return;
+fn navRefOff(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.NavRefOff, is_wasm32: bool) !void {
+ const comp = wasm.base.comp;
+ const zcu = comp.zcu.?;
+ const ip = &zcu.intern_pool;
+ const gpa = comp.gpa;
+ const is_obj = comp.config.output_mode == .Obj;
+ const nav_ty = ip.getNav(data.nav_index).typeOf(ip);
+ assert(!ip.isFunctionType(nav_ty));
+
+ try code.ensureUnusedCapacity(gpa, 11);
+
+ const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
+ code.appendAssumeCapacity(@intFromEnum(opcode));
+ if (is_obj) {
+ try wasm.out_relocs.append(gpa, .{
+ .offset = @intCast(code.items.len),
+ .pointee = .{ .symbol_index = try wasm.navSymbolIndex(data.nav_index) },
+ .tag = if (is_wasm32) .memory_addr_leb else .memory_addr_leb64,
+ .addend = data.offset,
+ });
+ code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
+ } else {
+ const addr = wasm.navAddr(data.nav_index);
+ leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + data.offset))) catch unreachable;
+ }
+}
- try emit.dbg_output.dwarf.setEpilogueBegin();
- try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
+fn appendOutputFunctionIndex(code: *std.ArrayListUnmanaged(u8), i: Wasm.OutputFunctionIndex) void {
+ leb.writeUleb128(code.fixedWriter(), @intFromEnum(i)) catch unreachable;
}
diff --git a/src/arch/wasm/Mir.zig b/src/arch/wasm/Mir.zig
index 2d4f624b22..5c8c558926 100644
--- a/src/arch/wasm/Mir.zig
+++ b/src/arch/wasm/Mir.zig
@@ -7,11 +7,15 @@
//! and known jump labels for blocks.
const Mir = @This();
+const InternPool = @import("../../InternPool.zig");
+const Wasm = @import("../../link/Wasm.zig");
+const builtin = @import("builtin");
const std = @import("std");
+const assert = std.debug.assert;
-/// A struct of array that represents each individual wasm
-instructions: std.MultiArrayList(Inst).Slice,
+instruction_tags: []const Inst.Tag,
+instruction_datas: []const Inst.Data,
/// A slice of indexes where the meaning of the data is determined by the
/// `Inst.Tag` value.
extra: []const u32,
@@ -26,16 +30,14 @@ pub const Inst = struct {
/// The position of a given MIR isntruction with the instruction list.
pub const Index = u32;
- /// Contains all possible wasm opcodes the Zig compiler may emit
- /// Rather than re-using std.wasm.Opcode, we only declare the opcodes
- /// we need, and also use this possibility to document how to access
- /// their payload.
- ///
- /// Note: Uses its actual opcode value representation to easily convert
- /// to and from its binary representation.
+ /// Some tags match wasm opcode values to facilitate trivial lowering.
pub const Tag = enum(u8) {
- /// Uses `nop`
+ /// Uses `tag`.
@"unreachable" = 0x00,
+ /// Emits epilogue begin debug information. Marks the end of the function.
+ ///
+ /// Uses `tag` (no additional data).
+ dbg_epilogue_begin,
/// Creates a new block that can be jump from.
///
/// Type of the block is given in data `block_type`
@@ -44,56 +46,92 @@ pub const Inst = struct {
///
/// Type of the loop is given in data `block_type`
loop = 0x03,
+ /// Lowers to an i32_const (wasm32) or i64_const (wasm64) which is the
+ /// memory address of an unnamed constant. When emitting an object
+ /// file, this adds a relocation.
+ ///
+ /// This may not refer to a function.
+ ///
+ /// Uses `ip_index`.
+ uav_ref,
+ /// Lowers to an i32_const (wasm32) or i64_const (wasm64) which is the
+ /// memory address of an unnamed constant, offset by an integer value.
+ /// When emitting an object file, this adds a relocation.
+ ///
+ /// This may not refer to a function.
+ ///
+ /// Uses `payload` pointing to a `UavRefOff`.
+ uav_ref_off,
+ /// Lowers to an i32_const (wasm32) or i64_const (wasm64) which is the
+ /// memory address of a named constant.
+ ///
+ /// May not refer to a function.
+ ///
+ /// Uses `nav_index`.
+ nav_ref,
+ /// Lowers to an i32_const (wasm32) or i64_const (wasm64) which is the
+ /// memory address of named constant, offset by an integer value.
+ /// When emitting an object file, this adds a relocation.
+ ///
+ /// May not refer to a function.
+ ///
+ /// Uses `payload` pointing to a `NavRefOff`.
+ nav_ref_off,
+ /// Lowers to an i32_const which is the index of the function in the
+ /// table section.
+ ///
+ /// Uses `indirect_function_table_index`.
+ func_ref,
/// Inserts debug information about the current line and column
/// of the source code
///
/// Uses `payload` of which the payload type is `DbgLineColumn`
- dbg_line = 0x06,
- /// Emits epilogue begin debug information
- ///
- /// Uses `nop`
- dbg_epilogue_begin = 0x07,
- /// Emits prologue end debug information
- ///
- /// Uses `nop`
- dbg_prologue_end = 0x08,
+ dbg_line,
+ /// Lowers to an i32_const containing the number of unique Zig error
+ /// names.
+ /// Uses `tag`.
+ errors_len,
/// Represents the end of a function body or an initialization expression
///
- /// Payload is `nop`
+ /// Uses `tag` (no additional data).
end = 0x0B,
/// Breaks from the current block to a label
///
- /// Data is `label` where index represents the label to jump to
+ /// Uses `label` where index represents the label to jump to
br = 0x0C,
/// Breaks from the current block if the stack value is non-zero
///
- /// Data is `label` where index represents the label to jump to
+ /// Uses `label` where index represents the label to jump to
br_if = 0x0D,
/// Jump table that takes the stack value as an index where each value
/// represents the label to jump to.
///
/// Data is extra of which the Payload's type is `JumpTable`
- br_table = 0x0E,
+ br_table,
/// Returns from the function
///
- /// Uses `nop`
+ /// Uses `tag`.
@"return" = 0x0F,
- /// Calls a function by its index
- ///
- /// Uses `label`
- call = 0x10,
+ /// Lowers to an i32_const (wasm32) or i64_const (wasm64) containing
+ /// the base address of the table of error code names, with each
+ /// element being a null-terminated slice.
+ ///
+ /// Uses `tag`.
+ error_name_table_ref,
+ /// Calls a function using `nav_index`.
+ call_nav,
/// Calls a function pointer by its function signature
/// and index into the function table.
///
- /// Uses `label`
- call_indirect = 0x11,
- /// Contains a symbol to a function pointer
- /// uses `label`
- ///
- /// Note: This uses `0x16` as value which is reserved by the WebAssembly
- /// specification but unused, meaning we must update this if the specification were to
- /// use this value.
- function_index = 0x16,
+ /// Uses `func_ty`
+ call_indirect,
+ /// Calls a function by its index.
+ ///
+ /// The function is the auto-generated tag name function for the type
+ /// provided in `ip_index`.
+ call_tag_name,
+ /// Lowers to a `call` instruction, using `intrinsic`.
+ call_intrinsic,
/// Pops three values from the stack and pushes
/// the first or second value dependent on the third value.
/// Uses `tag`
@@ -112,15 +150,11 @@ pub const Inst = struct {
///
/// Uses `label`
local_tee = 0x22,
- /// Loads a (mutable) global at given index onto the stack
- ///
- /// Uses `label`
- global_get = 0x23,
- /// Pops a value from the stack and sets the global at given index.
- /// Note: Both types must be equal and global must be marked mutable.
+ /// Pops a value from the stack and sets the stack pointer global.
+ /// The value must be the same type as the stack pointer global.
///
- /// Uses `label`.
- global_set = 0x24,
+ /// Uses `tag` (no additional data).
+ global_set_sp,
/// Loads a 32-bit integer from memory (data section) onto the stack
/// Pops the value from the stack which represents the offset into memory.
///
@@ -256,19 +290,19 @@ pub const Inst = struct {
/// Loads a 32-bit signed immediate value onto the stack
///
/// Uses `imm32`
- i32_const = 0x41,
+ i32_const,
/// Loads a i64-bit signed immediate value onto the stack
///
/// uses `payload` of type `Imm64`
- i64_const = 0x42,
+ i64_const,
/// Loads a 32-bit float value onto the stack.
///
/// Uses `float32`
- f32_const = 0x43,
+ f32_const,
/// Loads a 64-bit float value onto the stack.
///
/// Uses `payload` of type `Float64`
- f64_const = 0x44,
+ f64_const,
/// Uses `tag`
i32_eqz = 0x45,
/// Uses `tag`
@@ -522,25 +556,19 @@ pub const Inst = struct {
///
/// The `data` field depends on the extension instruction and
/// may contain additional data.
- misc_prefix = 0xFC,
+ misc_prefix,
/// The instruction consists of a simd opcode.
/// The actual simd-opcode is found at payload's index.
///
/// The `data` field depends on the simd instruction and
/// may contain additional data.
- simd_prefix = 0xFD,
+ simd_prefix,
/// The instruction consists of an atomics opcode.
/// The actual atomics-opcode is found at payload's index.
///
/// The `data` field depends on the atomics instruction and
/// may contain additional data.
atomics_prefix = 0xFE,
- /// Contains a symbol to a memory address
- /// Uses `label`
- ///
- /// Note: This uses `0xFF` as value as it is unused and not reserved
- /// by the wasm specification, making it safe to use.
- memory_address = 0xFF,
/// From a given wasm opcode, returns a MIR tag.
pub fn fromOpcode(opcode: std.wasm.Opcode) Tag {
@@ -560,26 +588,41 @@ pub const Inst = struct {
/// Uses no additional data
tag: void,
/// Contains the result type of a block
- ///
- /// Used by `block` and `loop`
- block_type: u8,
- /// Contains an u32 index into a wasm section entry, such as a local.
- /// Note: This is not an index to another instruction.
- ///
- /// Used by e.g. `local_get`, `local_set`, etc.
+ block_type: std.wasm.BlockType,
+ /// Label: Each structured control instruction introduces an implicit label.
+ /// Labels are targets for branch instructions that reference them with
+ /// label indices. Unlike with other index spaces, indexing of labels
+ /// is relative by nesting depth, that is, label 0 refers to the
+ /// innermost structured control instruction enclosing the referring
+ /// branch instruction, while increasing indices refer to those farther
+ /// out. Consequently, labels can only be referenced from within the
+ /// associated structured control instruction.
label: u32,
+ /// Local: The index space for locals is only accessible inside a function and
+ /// includes the parameters of that function, which precede the local
+ /// variables.
+ local: u32,
/// A 32-bit immediate value.
- ///
- /// Used by `i32_const`
imm32: i32,
/// A 32-bit float value
- ///
- /// Used by `f32_float`
float32: f32,
/// Index into `extra`. Meaning of what can be found there is context-dependent.
- ///
- /// Used by e.g. `br_table`
payload: u32,
+
+ ip_index: InternPool.Index,
+ nav_index: InternPool.Nav.Index,
+ func_ty: Wasm.FunctionType.Index,
+ intrinsic: Intrinsic,
+ uav_obj: Wasm.UavsObjIndex,
+ uav_exe: Wasm.UavsExeIndex,
+ indirect_function_table_index: Wasm.ZcuIndirectFunctionSetIndex,
+
+ comptime {
+ switch (builtin.mode) {
+ .Debug, .ReleaseSafe => {},
+ .ReleaseFast, .ReleaseSmall => assert(@sizeOf(Data) == 4),
+ }
+ }
};
};
@@ -596,6 +639,11 @@ pub fn extraData(self: *const Mir, comptime T: type, index: usize) struct { data
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => self.extra[i],
+ i32 => @bitCast(self.extra[i]),
+ Wasm.UavsObjIndex,
+ Wasm.UavsExeIndex,
+ InternPool.Nav.Index,
+ => @enumFromInt(self.extra[i]),
else => |field_type| @compileError("Unsupported field type " ++ @typeName(field_type)),
};
i += 1;
@@ -609,28 +657,19 @@ pub const JumpTable = struct {
length: u32,
};
-/// Stores an unsigned 64bit integer
-/// into a 32bit most significant bits field
-/// and a 32bit least significant bits field.
-///
-/// This uses an unsigned integer rather than a signed integer
-/// as we can easily store those into `extra`
pub const Imm64 = struct {
msb: u32,
lsb: u32,
- pub fn fromU64(imm: u64) Imm64 {
+ pub fn init(full: u64) Imm64 {
return .{
- .msb = @as(u32, @truncate(imm >> 32)),
- .lsb = @as(u32, @truncate(imm)),
+ .msb = @truncate(full >> 32),
+ .lsb = @truncate(full),
};
}
- pub fn toU64(self: Imm64) u64 {
- var result: u64 = 0;
- result |= @as(u64, self.msb) << 32;
- result |= @as(u64, self.lsb);
- return result;
+ pub fn toInt(i: Imm64) u64 {
+ return (@as(u64, i.msb) << 32) | @as(u64, i.lsb);
}
};
@@ -638,23 +677,16 @@ pub const Float64 = struct {
msb: u32,
lsb: u32,
- pub fn fromFloat64(float: f64) Float64 {
- const tmp = @as(u64, @bitCast(float));
+ pub fn init(f: f64) Float64 {
+ const int: u64 = @bitCast(f);
return .{
- .msb = @as(u32, @truncate(tmp >> 32)),
- .lsb = @as(u32, @truncate(tmp)),
+ .msb = @truncate(int >> 32),
+ .lsb = @truncate(int),
};
}
- pub fn toF64(self: Float64) f64 {
- @as(f64, @bitCast(self.toU64()));
- }
-
- pub fn toU64(self: Float64) u64 {
- var result: u64 = 0;
- result |= @as(u64, self.msb) << 32;
- result |= @as(u64, self.lsb);
- return result;
+ pub fn toInt(f: Float64) u64 {
+ return (@as(u64, f.msb) << 32) | @as(u64, f.lsb);
}
};
@@ -663,11 +695,19 @@ pub const MemArg = struct {
alignment: u32,
};
-/// Represents a memory address, which holds both the pointer
-/// or the parent pointer and the offset to it.
-pub const Memory = struct {
- pointer: u32,
- offset: u32,
+pub const UavRefOffObj = struct {
+ uav_obj: Wasm.UavsObjIndex,
+ offset: i32,
+};
+
+pub const UavRefOffExe = struct {
+ uav_exe: Wasm.UavsExeIndex,
+ offset: i32,
+};
+
+pub const NavRefOff = struct {
+ nav_index: InternPool.Nav.Index,
+ offset: i32,
};
/// Maps a source line with wasm bytecode
@@ -675,3 +715,199 @@ pub const DbgLineColumn = struct {
line: u32,
column: u32,
};
+
+/// Tag names exactly match the corresponding symbol name.
+pub const Intrinsic = enum(u32) {
+ __addhf3,
+ __addtf3,
+ __addxf3,
+ __ashlti3,
+ __ashrti3,
+ __bitreversedi2,
+ __bitreversesi2,
+ __bswapdi2,
+ __bswapsi2,
+ __ceilh,
+ __ceilx,
+ __cosh,
+ __cosx,
+ __divhf3,
+ __divtf3,
+ __divti3,
+ __divxf3,
+ __eqtf2,
+ __eqxf2,
+ __exp2h,
+ __exp2x,
+ __exph,
+ __expx,
+ __extenddftf2,
+ __extenddfxf2,
+ __extendhfsf2,
+ __extendhftf2,
+ __extendhfxf2,
+ __extendsftf2,
+ __extendsfxf2,
+ __extendxftf2,
+ __fabsh,
+ __fabsx,
+ __fixdfdi,
+ __fixdfsi,
+ __fixdfti,
+ __fixhfdi,
+ __fixhfsi,
+ __fixhfti,
+ __fixsfdi,
+ __fixsfsi,
+ __fixsfti,
+ __fixtfdi,
+ __fixtfsi,
+ __fixtfti,
+ __fixunsdfdi,
+ __fixunsdfsi,
+ __fixunsdfti,
+ __fixunshfdi,
+ __fixunshfsi,
+ __fixunshfti,
+ __fixunssfdi,
+ __fixunssfsi,
+ __fixunssfti,
+ __fixunstfdi,
+ __fixunstfsi,
+ __fixunstfti,
+ __fixunsxfdi,
+ __fixunsxfsi,
+ __fixunsxfti,
+ __fixxfdi,
+ __fixxfsi,
+ __fixxfti,
+ __floatdidf,
+ __floatdihf,
+ __floatdisf,
+ __floatditf,
+ __floatdixf,
+ __floatsidf,
+ __floatsihf,
+ __floatsisf,
+ __floatsitf,
+ __floatsixf,
+ __floattidf,
+ __floattihf,
+ __floattisf,
+ __floattitf,
+ __floattixf,
+ __floatundidf,
+ __floatundihf,
+ __floatundisf,
+ __floatunditf,
+ __floatundixf,
+ __floatunsidf,
+ __floatunsihf,
+ __floatunsisf,
+ __floatunsitf,
+ __floatunsixf,
+ __floatuntidf,
+ __floatuntihf,
+ __floatuntisf,
+ __floatuntitf,
+ __floatuntixf,
+ __floorh,
+ __floorx,
+ __fmah,
+ __fmax,
+ __fmaxh,
+ __fmaxx,
+ __fminh,
+ __fminx,
+ __fmodh,
+ __fmodx,
+ __getf2,
+ __gexf2,
+ __gttf2,
+ __gtxf2,
+ __letf2,
+ __lexf2,
+ __log10h,
+ __log10x,
+ __log2h,
+ __log2x,
+ __logh,
+ __logx,
+ __lshrti3,
+ __lttf2,
+ __ltxf2,
+ __modti3,
+ __mulhf3,
+ __mulodi4,
+ __muloti4,
+ __multf3,
+ __multi3,
+ __mulxf3,
+ __netf2,
+ __nexf2,
+ __roundh,
+ __roundx,
+ __sinh,
+ __sinx,
+ __sqrth,
+ __sqrtx,
+ __subhf3,
+ __subtf3,
+ __subxf3,
+ __tanh,
+ __tanx,
+ __trunch,
+ __truncsfhf2,
+ __trunctfdf2,
+ __trunctfhf2,
+ __trunctfsf2,
+ __trunctfxf2,
+ __truncx,
+ __truncxfdf2,
+ __truncxfhf2,
+ __truncxfsf2,
+ __udivti3,
+ __umodti3,
+ ceilq,
+ cos,
+ cosf,
+ cosq,
+ exp,
+ exp2,
+ exp2f,
+ exp2q,
+ expf,
+ expq,
+ fabsq,
+ floorq,
+ fma,
+ fmaf,
+ fmaq,
+ fmax,
+ fmaxf,
+ fmaxq,
+ fmin,
+ fminf,
+ fminq,
+ fmod,
+ fmodf,
+ fmodq,
+ log,
+ log10,
+ log10f,
+ log10q,
+ log2,
+ log2f,
+ log2q,
+ logf,
+ logq,
+ roundq,
+ sin,
+ sinf,
+ sinq,
+ sqrtq,
+ tan,
+ tanf,
+ tanq,
+ truncq,
+};
diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig
index 5a1d2fdb0b..d7ca4cf715 100644
--- a/src/arch/wasm/abi.zig
+++ b/src/arch/wasm/abi.zig
@@ -22,7 +22,7 @@ const direct: [2]Class = .{ .direct, .none };
/// Classifies a given Zig type to determine how they must be passed
/// or returned as value within a wasm function.
/// When all elements result in `.none`, no value must be passed in or returned.
-pub fn classifyType(ty: Type, zcu: *Zcu) [2]Class {
+pub fn classifyType(ty: Type, zcu: *const Zcu) [2]Class {
const ip = &zcu.intern_pool;
const target = zcu.getTarget();
if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return none;
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 298b2e11e0..940cd00195 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -19,7 +19,6 @@ const Allocator = mem.Allocator;
const CodeGenError = codegen.CodeGenError;
const Compilation = @import("../../Compilation.zig");
const ErrorMsg = Zcu.ErrorMsg;
-const Result = codegen.Result;
const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Lower = @import("Lower.zig");
@@ -59,7 +58,6 @@ target: *const std.Target,
owner: Owner,
inline_func: InternPool.Index,
mod: *Package.Module,
-err_msg: ?*ErrorMsg,
arg_index: u32,
args: []MCValue,
va_info: union {
@@ -819,9 +817,9 @@ pub fn generate(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
- code: *std.ArrayList(u8),
+ code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
-) CodeGenError!Result {
+) CodeGenError!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = zcu.gpa;
@@ -841,7 +839,6 @@ pub fn generate(
.debug_output = debug_output,
.owner = .{ .nav_index = func.owner_nav },
.inline_func = func_index,
- .err_msg = null,
.arg_index = undefined,
.args = undefined, // populated after `resolveCallingConventionValues`
.va_info = undefined, // populated after `resolveCallingConventionValues`
@@ -881,15 +878,7 @@ pub fn generate(
const fn_info = zcu.typeToFunc(fn_type).?;
const cc = abi.resolveCallingConvention(fn_info.cc, function.target.*);
var call_info = function.resolveCallingConventionValues(fn_info, &.{}, .args_frame) catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(
- gpa,
- src_loc,
- "CodeGen ran out of registers. This is a bug in the Zig compiler.",
- .{},
- ),
- },
+ error.CodegenFail => return error.CodegenFail,
else => |e| return e,
};
defer call_info.deinit(&function);
@@ -926,10 +915,8 @@ pub fn generate(
};
function.gen() catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
- },
+ error.CodegenFail => return error.CodegenFail,
+ error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
else => |e| return e,
};
@@ -953,10 +940,7 @@ pub fn generate(
.pic = mod.pic,
},
.atom_index = function.owner.getSymbolIndex(&function) catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
- },
+ error.CodegenFail => return error.CodegenFail,
else => |e| return e,
},
.debug_output = debug_output,
@@ -974,29 +958,11 @@ pub fn generate(
};
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
- error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? },
- error.InvalidInstruction, error.CannotEncode => |e| {
- const msg = switch (e) {
- error.InvalidInstruction => "CodeGen failed to find a viable instruction.",
- error.CannotEncode => "CodeGen failed to encode the instruction.",
- };
- return Result{
- .fail = try ErrorMsg.create(
- gpa,
- src_loc,
- "{s} This is a bug in the Zig compiler.",
- .{msg},
- ),
- };
- },
- else => |e| return e,
- };
+ error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?),
- if (function.err_msg) |em| {
- return Result{ .fail = em };
- } else {
- return Result.ok;
- }
+ error.InvalidInstruction, error.CannotEncode => |e| return function.fail("emit MIR failed: {s} (Zig compiler bug)", .{@errorName(e)}),
+ else => |e| return function.fail("emit MIR failed: {s}", .{@errorName(e)}),
+ };
}
pub fn generateLazy(
@@ -1004,9 +970,9 @@ pub fn generateLazy(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
- code: *std.ArrayList(u8),
+ code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
-) CodeGenError!Result {
+) CodeGenError!void {
const comp = bin_file.comp;
const gpa = comp.gpa;
// This function is for generating global code, so we use the root module.
@@ -1022,7 +988,6 @@ pub fn generateLazy(
.debug_output = debug_output,
.owner = .{ .lazy_sym = lazy_sym },
.inline_func = undefined,
- .err_msg = null,
.arg_index = undefined,
.args = undefined,
.va_info = undefined,
@@ -1038,10 +1003,8 @@ pub fn generateLazy(
}
function.genLazy(lazy_sym) catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
- },
+ error.CodegenFail => return error.CodegenFail,
+ error.OutOfRegisters => return function.fail("ran out of registers (Zig compiler bug)", .{}),
else => |e| return e,
};
@@ -1065,10 +1028,7 @@ pub fn generateLazy(
.pic = mod.pic,
},
.atom_index = function.owner.getSymbolIndex(&function) catch |err| switch (err) {
- error.CodegenFail => return Result{ .fail = function.err_msg.? },
- error.OutOfRegisters => return Result{
- .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
- },
+ error.CodegenFail => return error.CodegenFail,
else => |e| return e,
},
.debug_output = debug_output,
@@ -1078,29 +1038,11 @@ pub fn generateLazy(
};
defer emit.deinit();
emit.emitMir() catch |err| switch (err) {
- error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? },
- error.InvalidInstruction, error.CannotEncode => |e| {
- const msg = switch (e) {
- error.InvalidInstruction => "CodeGen failed to find a viable instruction.",
- error.CannotEncode => "CodeGen failed to encode the instruction.",
- };
- return Result{
- .fail = try ErrorMsg.create(
- gpa,
- src_loc,
- "{s} This is a bug in the Zig compiler.",
- .{msg},
- ),
- };
- },
- else => |e| return e,
+ error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?),
+ error.InvalidInstruction => return function.fail("failed to find a viable x86 instruction (Zig compiler bug)", .{}),
+ error.CannotEncode => return function.fail("failed to encode x86 instruction (Zig compiler bug)", .{}),
+ else => |e| return function.fail("failed to emit MIR: {s}", .{@errorName(e)}),
};
-
- if (function.err_msg) |em| {
- return Result{ .fail = em };
- } else {
- return Result.ok;
- }
}
const FormatNavData = struct {
@@ -19276,10 +19218,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.load_got => |sym_index| .{ .lea_got = sym_index },
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
},
- .fail => |msg| {
- self.err_msg = msg;
- return error.CodegenFail;
- },
+ .fail => |msg| return self.failMsg(msg),
};
}
@@ -19592,11 +19531,23 @@ fn resolveCallingConventionValues(
return result;
}
-fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError {
+fn fail(self: *Self, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
- assert(self.err_msg == null);
- const gpa = self.gpa;
- self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args);
+ const zcu = self.pt.zcu;
+ switch (self.owner) {
+ .nav_index => |i| return zcu.codegenFail(i, format, args),
+ .lazy_sym => |s| return zcu.codegenFailType(s.ty, format, args),
+ }
+ return error.CodegenFail;
+}
+
+fn failMsg(self: *Self, msg: *ErrorMsg) error{ OutOfMemory, CodegenFail } {
+ @branchHint(.cold);
+ const zcu = self.pt.zcu;
+ switch (self.owner) {
+ .nav_index => |i| return zcu.codegenFailMsg(i, msg),
+ .lazy_sym => |s| return zcu.codegenFailTypeMsg(s.ty, msg),
+ }
return error.CodegenFail;
}
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index f744eb3fc4..6e0d75f883 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -4,7 +4,7 @@ air: Air,
lower: Lower,
atom_index: u32,
debug_output: link.File.DebugInfoOutput,
-code: *std.ArrayList(u8),
+code: *std.ArrayListUnmanaged(u8),
prev_di_loc: Loc,
/// Relative to the beginning of `code`.
@@ -18,6 +18,7 @@ pub const Error = Lower.Error || error{
} || link.File.UpdateDebugInfoError;
pub fn emitMir(emit: *Emit) Error!void {
+ const gpa = emit.lower.bin_file.comp.gpa;
for (0..emit.lower.mir.instructions.len) |mir_i| {
const mir_index: Mir.Inst.Index = @intCast(mir_i);
try emit.code_offset_mapping.putNoClobber(
@@ -82,7 +83,7 @@ pub fn emitMir(emit: *Emit) Error!void {
}
continue;
}
- try lowered_inst.encode(emit.code.writer(), .{});
+ try lowered_inst.encode(emit.code.writer(gpa), .{});
const end_offset: u32 = @intCast(emit.code.items.len);
while (lowered_relocs.len > 0 and
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
@@ -100,7 +101,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const zo = elf_file.zigObjectPtr().?;
const atom_ptr = zo.symbol(emit.atom_index).atom(elf_file).?;
const r_type = @intFromEnum(std.elf.R_X86_64.PLT32);
- try atom_ptr.addReloc(elf_file.base.comp.gpa, .{
+ try atom_ptr.addReloc(gpa, .{
.r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
.r_addend = lowered_relocs[0].off - 4,
@@ -147,7 +148,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const zo = elf_file.zigObjectPtr().?;
const atom = zo.symbol(emit.atom_index).atom(elf_file).?;
const r_type = @intFromEnum(std.elf.R_X86_64.TLSLD);
- try atom.addReloc(elf_file.base.comp.gpa, .{
+ try atom.addReloc(gpa, .{
.r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
.r_addend = lowered_relocs[0].off - 4,
@@ -158,7 +159,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const zo = elf_file.zigObjectPtr().?;
const atom = zo.symbol(emit.atom_index).atom(elf_file).?;
const r_type = @intFromEnum(std.elf.R_X86_64.DTPOFF32);
- try atom.addReloc(elf_file.base.comp.gpa, .{
+ try atom.addReloc(gpa, .{
.r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
.r_addend = lowered_relocs[0].off,
@@ -173,7 +174,7 @@ pub fn emitMir(emit: *Emit) Error!void {
@intFromEnum(std.elf.R_X86_64.GOTPCREL)
else
@intFromEnum(std.elf.R_X86_64.PC32);
- try atom.addReloc(elf_file.base.comp.gpa, .{
+ try atom.addReloc(gpa, .{
.r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
.r_addend = lowered_relocs[0].off - 4,
@@ -183,7 +184,7 @@ pub fn emitMir(emit: *Emit) Error!void {
@intFromEnum(std.elf.R_X86_64.TPOFF32)
else
@intFromEnum(std.elf.R_X86_64.@"32");
- try atom.addReloc(elf_file.base.comp.gpa, .{
+ try atom.addReloc(gpa, .{
.r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
.r_addend = lowered_relocs[0].off,