aboutsummaryrefslogtreecommitdiff
path: root/src/arch/wasm/CodeGen.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2024-12-05 23:46:46 -0800
committerAndrew Kelley <andrew@ziglang.org>2025-01-15 15:11:35 -0800
commite521879e4730fd85a92081c0040db7dc5daad8a3 (patch)
treebe07a1a575ad2728027846e0b8cce0f50b807cf0 /src/arch/wasm/CodeGen.zig
parentb9355edfb1db042098bc232cf8e52e079f4fcf4e (diff)
downloadzig-e521879e4730fd85a92081c0040db7dc5daad8a3.tar.gz
zig-e521879e4730fd85a92081c0040db7dc5daad8a3.zip
rewrite wasm/Emit.zig
mainly, rework how relocations works. This is the point at which symbol indexes are known - not before. And don't emit unnecessary relocations! They're only needed when emitting an object file. Changes wasm linker to keep MIR around long-lived so that fixups can be reapplied after linker garbage collection. use labeled switch while we're at it
Diffstat (limited to 'src/arch/wasm/CodeGen.zig')
-rw-r--r--src/arch/wasm/CodeGen.zig698
1 files changed, 366 insertions, 332 deletions
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index f7a0c44d3c..161a4d0cbc 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -7,6 +7,7 @@ const leb = std.leb;
const mem = std.mem;
const log = std.log.scoped(.codegen);
+const CodeGen = @This();
const codegen = @import("../../codegen.zig");
const Zcu = @import("../../Zcu.zig");
const InternPool = @import("../../InternPool.zig");
@@ -24,6 +25,98 @@ const abi = @import("abi.zig");
const Alignment = InternPool.Alignment;
const errUnionPayloadOffset = codegen.errUnionPayloadOffset;
const errUnionErrorOffset = codegen.errUnionErrorOffset;
+const Wasm = link.File.Wasm;
+
+/// Reference to the function declaration the code
+/// section belongs to
+owner_nav: InternPool.Nav.Index,
+/// Current block depth. Used to calculate the relative difference between a break
+/// and block
+block_depth: u32 = 0,
+air: Air,
+liveness: Liveness,
+gpa: mem.Allocator,
+func_index: InternPool.Index,
+/// Contains a list of current branches.
+/// When we return from a branch, the branch will be popped from this list,
+/// which means branches can only contain references from within its own branch,
+/// or a branch higher (lower index) in the tree.
+branches: std.ArrayListUnmanaged(Branch) = .empty,
+/// Table to save `WValue`'s generated by an `Air.Inst`
+// values: ValueTable,
+/// Mapping from Air.Inst.Index to block ids
+blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, struct {
+ label: u32,
+ value: WValue,
+}) = .{},
+/// Maps `loop` instructions to their label. `br` to here repeats the loop.
+loops: std.AutoHashMapUnmanaged(Air.Inst.Index, u32) = .empty,
+/// The index the next local generated will have
+/// NOTE: arguments share the index with locals therefore the first variable
+/// will have the index that comes after the last argument's index
+local_index: u32,
+/// The index of the current argument.
+/// Used to track which argument is being referenced in `airArg`.
+arg_index: u32 = 0,
+/// List of simd128 immediates. Each value is stored as an array of bytes.
+/// This list will only be populated for 128bit-simd values when the target features
+/// are enabled also.
+simd_immediates: std.ArrayListUnmanaged([16]u8) = .empty,
+/// The Target we're emitting (used to call intInfo)
+target: *const std.Target,
+wasm: *link.File.Wasm,
+pt: Zcu.PerThread,
+/// List of MIR Instructions
+mir_instructions: *std.MultiArrayList(Mir.Inst),
+/// Contains extra data for MIR
+mir_extra: *std.ArrayListUnmanaged(u32),
+/// List of all locals' types generated throughout this declaration
+/// used to emit locals count at start of 'code' section.
+locals: *std.ArrayListUnmanaged(u8),
+/// When a function is executing, we store the the current stack pointer's value within this local.
+/// This value is then used to restore the stack pointer to the original value at the return of the function.
+initial_stack_value: WValue = .none,
+/// The current stack pointer subtracted with the stack size. From this value, we will calculate
+/// all offsets of the stack values.
+bottom_stack_value: WValue = .none,
+/// Arguments of this function declaration
+/// This will be set after `resolveCallingConventionValues`
+args: []WValue,
+/// This will only be `.none` if the function returns void, or returns an immediate.
+/// When it returns a pointer to the stack, the `.local` tag will be active and must be populated
+/// before this function returns its execution to the caller.
+return_value: WValue,
+/// The size of the stack this function occupies. In the function prologue
+/// we will move the stack pointer by this number, forward aligned with the `stack_alignment`.
+stack_size: u32 = 0,
+/// The stack alignment, which is 16 bytes by default. This is specified by the
+/// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
+/// and also what the llvm backend will emit.
+/// However, local variables or the usage of `incoming_stack_alignment` in a `CallingConvention` can overwrite this default.
+stack_alignment: Alignment = .@"16",
+
+// For each individual Wasm valtype we store a seperate free list which
+// allows us to re-use locals that are no longer used. e.g. a temporary local.
+/// A list of indexes which represents a local of valtype `i32`.
+/// It is illegal to store a non-i32 valtype in this list.
+free_locals_i32: std.ArrayListUnmanaged(u32) = .empty,
+/// A list of indexes which represents a local of valtype `i64`.
+/// It is illegal to store a non-i64 valtype in this list.
+free_locals_i64: std.ArrayListUnmanaged(u32) = .empty,
+/// A list of indexes which represents a local of valtype `f32`.
+/// It is illegal to store a non-f32 valtype in this list.
+free_locals_f32: std.ArrayListUnmanaged(u32) = .empty,
+/// A list of indexes which represents a local of valtype `f64`.
+/// It is illegal to store a non-f64 valtype in this list.
+free_locals_f64: std.ArrayListUnmanaged(u32) = .empty,
+/// A list of indexes which represents a local of valtype `v127`.
+/// It is illegal to store a non-v128 valtype in this list.
+free_locals_v128: std.ArrayListUnmanaged(u32) = .empty,
+
+/// When in debug mode, this tracks if no `finishAir` was missed.
+/// Forgetting to call `finishAir` will cause the result to not be
+/// stored in our `values` map and therefore cause bugs.
+air_bookkeeping: @TypeOf(bookkeeping_init) = bookkeeping_init,
/// Wasm Value, created when generating an instruction
const WValue = union(enum) {
@@ -601,104 +694,6 @@ test "Wasm - buildOpcode" {
/// Hashmap to store generated `WValue` for each `Air.Inst.Ref`
pub const ValueTable = std.AutoArrayHashMapUnmanaged(Air.Inst.Ref, WValue);
-const CodeGen = @This();
-
-/// Reference to the function declaration the code
-/// section belongs to
-owner_nav: InternPool.Nav.Index,
-src_loc: Zcu.LazySrcLoc,
-/// Current block depth. Used to calculate the relative difference between a break
-/// and block
-block_depth: u32 = 0,
-air: Air,
-liveness: Liveness,
-gpa: mem.Allocator,
-debug_output: link.File.DebugInfoOutput,
-func_index: InternPool.Index,
-/// Contains a list of current branches.
-/// When we return from a branch, the branch will be popped from this list,
-/// which means branches can only contain references from within its own branch,
-/// or a branch higher (lower index) in the tree.
-branches: std.ArrayListUnmanaged(Branch) = .empty,
-/// Table to save `WValue`'s generated by an `Air.Inst`
-// values: ValueTable,
-/// Mapping from Air.Inst.Index to block ids
-blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, struct {
- label: u32,
- value: WValue,
-}) = .{},
-/// Maps `loop` instructions to their label. `br` to here repeats the loop.
-loops: std.AutoHashMapUnmanaged(Air.Inst.Index, u32) = .empty,
-/// `bytes` contains the wasm bytecode belonging to the 'code' section.
-code: *std.ArrayListUnmanaged(u8),
-/// The index the next local generated will have
-/// NOTE: arguments share the index with locals therefore the first variable
-/// will have the index that comes after the last argument's index
-local_index: u32 = 0,
-/// The index of the current argument.
-/// Used to track which argument is being referenced in `airArg`.
-arg_index: u32 = 0,
-/// List of all locals' types generated throughout this declaration
-/// used to emit locals count at start of 'code' section.
-locals: std.ArrayListUnmanaged(u8),
-/// List of simd128 immediates. Each value is stored as an array of bytes.
-/// This list will only be populated for 128bit-simd values when the target features
-/// are enabled also.
-simd_immediates: std.ArrayListUnmanaged([16]u8) = .empty,
-/// The Target we're emitting (used to call intInfo)
-target: *const std.Target,
-/// Represents the wasm binary file that is being linked.
-bin_file: *link.File.Wasm,
-pt: Zcu.PerThread,
-/// List of MIR Instructions
-mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
-/// Contains extra data for MIR
-mir_extra: std.ArrayListUnmanaged(u32) = .empty,
-/// When a function is executing, we store the the current stack pointer's value within this local.
-/// This value is then used to restore the stack pointer to the original value at the return of the function.
-initial_stack_value: WValue = .none,
-/// The current stack pointer subtracted with the stack size. From this value, we will calculate
-/// all offsets of the stack values.
-bottom_stack_value: WValue = .none,
-/// Arguments of this function declaration
-/// This will be set after `resolveCallingConventionValues`
-args: []WValue = &.{},
-/// This will only be `.none` if the function returns void, or returns an immediate.
-/// When it returns a pointer to the stack, the `.local` tag will be active and must be populated
-/// before this function returns its execution to the caller.
-return_value: WValue = .none,
-/// The size of the stack this function occupies. In the function prologue
-/// we will move the stack pointer by this number, forward aligned with the `stack_alignment`.
-stack_size: u32 = 0,
-/// The stack alignment, which is 16 bytes by default. This is specified by the
-/// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
-/// and also what the llvm backend will emit.
-/// However, local variables or the usage of `incoming_stack_alignment` in a `CallingConvention` can overwrite this default.
-stack_alignment: Alignment = .@"16",
-
-// For each individual Wasm valtype we store a seperate free list which
-// allows us to re-use locals that are no longer used. e.g. a temporary local.
-/// A list of indexes which represents a local of valtype `i32`.
-/// It is illegal to store a non-i32 valtype in this list.
-free_locals_i32: std.ArrayListUnmanaged(u32) = .empty,
-/// A list of indexes which represents a local of valtype `i64`.
-/// It is illegal to store a non-i64 valtype in this list.
-free_locals_i64: std.ArrayListUnmanaged(u32) = .empty,
-/// A list of indexes which represents a local of valtype `f32`.
-/// It is illegal to store a non-f32 valtype in this list.
-free_locals_f32: std.ArrayListUnmanaged(u32) = .empty,
-/// A list of indexes which represents a local of valtype `f64`.
-/// It is illegal to store a non-f64 valtype in this list.
-free_locals_f64: std.ArrayListUnmanaged(u32) = .empty,
-/// A list of indexes which represents a local of valtype `v127`.
-/// It is illegal to store a non-v128 valtype in this list.
-free_locals_v128: std.ArrayListUnmanaged(u32) = .empty,
-
-/// When in debug mode, this tracks if no `finishAir` was missed.
-/// Forgetting to call `finishAir` will cause the result to not be
-/// stored in our `values` map and therefore cause bugs.
-air_bookkeeping: @TypeOf(bookkeeping_init) = bookkeeping_init,
-
const bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {};
const InnerError = error{
@@ -719,8 +714,6 @@ pub fn deinit(func: *CodeGen) void {
func.loops.deinit(func.gpa);
func.locals.deinit(func.gpa);
func.simd_immediates.deinit(func.gpa);
- func.mir_instructions.deinit(func.gpa);
- func.mir_extra.deinit(func.gpa);
func.free_locals_i32.deinit(func.gpa);
func.free_locals_i64.deinit(func.gpa);
func.free_locals_f32.deinit(func.gpa);
@@ -729,9 +722,10 @@ pub fn deinit(func: *CodeGen) void {
func.* = undefined;
}
-fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
- const msg = try Zcu.ErrorMsg.create(func.gpa, func.src_loc, fmt, args);
- return func.pt.zcu.codegenFailMsg(func.owner_nav, msg);
+fn fail(cg: *CodeGen, comptime fmt: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
+ const zcu = cg.pt.zcu;
+ const func = zcu.funcInfo(cg.func_index);
+ return zcu.codegenFail(func.owner_nav, fmt, args);
}
/// Resolves the `WValue` for the given instruction `inst`
@@ -767,7 +761,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
//
// In the other cases, we will simply lower the constant to a value that fits
// into a single local (such as a pointer, integer, bool, etc).
- const result: WValue = if (isByRef(ty, pt, func.target.*))
+ const result: WValue = if (isByRef(ty, pt, func.target))
.{ .memory = val.toIntern() }
else
try func.lowerConstant(val, ty);
@@ -885,8 +879,12 @@ fn addLabel(func: *CodeGen, tag: Mir.Inst.Tag, label: u32) error{OutOfMemory}!vo
try func.addInst(.{ .tag = tag, .data = .{ .label = label } });
}
-fn addCallTagName(func: *CodeGen, ip_index: InternPool.Index) error{OutOfMemory}!void {
- try func.addInst(.{ .tag = .call_tag_name, .data = .{ .ip_index = ip_index } });
+fn addIpIndex(func: *CodeGen, tag: Mir.Inst.Tag, i: InternPool.Index) Allocator.Error!void {
+ try func.addInst(.{ .tag = tag, .data = .{ .ip_index = i } });
+}
+
+fn addNav(func: *CodeGen, tag: Mir.Inst.Tag, i: InternPool.Nav.Index) Allocator.Error!void {
+ try func.addInst(.{ .tag = tag, .data = .{ .nav_index = i } });
}
/// Accepts an unsigned 32bit integer rather than a signed integer to
@@ -900,7 +898,7 @@ fn addImm32(func: *CodeGen, imm: u32) error{OutOfMemory}!void {
/// prevent us from having to bitcast multiple times as most values
/// within codegen are represented as unsigned rather than signed.
fn addImm64(func: *CodeGen, imm: u64) error{OutOfMemory}!void {
- const extra_index = try func.addExtra(Mir.Imm64.fromU64(imm));
+ const extra_index = try func.addExtra(Mir.Imm64.init(imm));
try func.addInst(.{ .tag = .i64_const, .data = .{ .payload = extra_index } });
}
@@ -916,7 +914,7 @@ fn addImm128(func: *CodeGen, index: u32) error{OutOfMemory}!void {
}
fn addFloat64(func: *CodeGen, float: f64) error{OutOfMemory}!void {
- const extra_index = try func.addExtra(Mir.Float64.fromFloat64(float));
+ const extra_index = try func.addExtra(Mir.Float64.init(float));
try func.addInst(.{ .tag = .f64_const, .data = .{ .payload = extra_index } });
}
@@ -956,6 +954,8 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
inline for (fields) |field| {
func.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
+ i32 => @bitCast(@field(extra, field.name)),
+ InternPool.Index => @intFromEnum(@field(extra, field.name)),
else => |field_type| @compileError("Unsupported field type " ++ @typeName(field_type)),
});
}
@@ -963,11 +963,11 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
}
/// Using a given `Type`, returns the corresponding valtype for .auto callconv
-fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) std.wasm.Valtype {
+fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: *const std.Target) std.wasm.Valtype {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
return switch (ty.zigTypeTag(zcu)) {
- .float => switch (ty.floatBits(target)) {
+ .float => switch (ty.floatBits(target.*)) {
16 => .i32, // stored/loaded as u16
32 => .f32,
64 => .f64,
@@ -1003,14 +1003,14 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) std.wasm.Valty
}
/// Using a given `Type`, returns the byte representation of its wasm value type
-fn genValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) u8 {
+fn genValtype(ty: Type, pt: Zcu.PerThread, target: *const std.Target) u8 {
return @intFromEnum(typeToValtype(ty, pt, target));
}
/// Using a given `Type`, returns the corresponding wasm value type
/// Differently from `genValtype` this also allows `void` to create a block
/// with no return type
-fn genBlockType(ty: Type, pt: Zcu.PerThread, target: std.Target) u8 {
+fn genBlockType(ty: Type, pt: Zcu.PerThread, target: *const std.Target) u8 {
return switch (ty.ip_index) {
.void_type, .noreturn_type => std.wasm.block_empty,
else => genValtype(ty, pt, target),
@@ -1028,15 +1028,17 @@ fn emitWValue(func: *CodeGen, value: WValue) InnerError!void {
.imm128 => |val| try func.addImm128(val),
.float32 => |val| try func.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }),
.float64 => |val| try func.addFloat64(val),
- .memory => |ptr| {
- const extra_index = try func.addExtra(Mir.Memory{ .pointer = ptr, .offset = 0 });
- try func.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } });
- },
- .memory_offset => |mem_off| {
- const extra_index = try func.addExtra(Mir.Memory{ .pointer = mem_off.pointer, .offset = mem_off.offset });
- try func.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } });
- },
- .function_index => |index| try func.addLabel(.function_index, index), // write function index and generate relocation
+ .memory => |ptr| try func.addInst(.{ .tag = .uav_ref, .data = .{ .ip_index = ptr } }),
+ .memory_offset => |mo| try func.addInst(.{
+ .tag = .uav_ref_off,
+ .data = .{
+ .payload = try func.addExtra(Mir.UavRefOff{
+ .ip_index = mo.pointer,
+ .offset = @intCast(mo.offset), // TODO should not be an assert
+ }),
+ },
+ }),
+ .function_index => |index| try func.addIpIndex(.function_index, index),
.stack_offset => try func.addLabel(.local_get, func.bottom_stack_value.local.value), // caller must ensure to address the offset
}
}
@@ -1075,7 +1077,7 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue {
/// Returns a corresponding `Wvalue` with `local` as active tag
fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
const pt = func.pt;
- const valtype = typeToValtype(ty, pt, func.target.*);
+ const valtype = typeToValtype(ty, pt, func.target);
const index_or_null = switch (valtype) {
.i32 => func.free_locals_i32.popOrNull(),
.i64 => func.free_locals_i64.popOrNull(),
@@ -1095,7 +1097,7 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
/// to use a zero-initialized local.
fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
const pt = func.pt;
- try func.locals.append(func.gpa, genValtype(ty, pt, func.target.*));
+ try func.locals.append(func.gpa, genValtype(ty, pt, func.target));
const initial_index = func.local_index;
func.local_index += 1;
return .{ .local = .{ .value = initial_index, .references = 1 } };
@@ -1107,7 +1109,7 @@ fn genFunctype(
params: []const InternPool.Index,
return_type: Type,
pt: Zcu.PerThread,
- target: std.Target,
+ target: *const std.Target,
) !link.File.Wasm.FunctionType.Index {
const zcu = pt.zcu;
const gpa = zcu.gpa;
@@ -1162,150 +1164,206 @@ fn genFunctype(
});
}
-pub fn generate(
- bin_file: *link.File,
+pub const Function = extern struct {
+ /// Index into `Wasm.mir_instructions`.
+ mir_off: u32,
+ /// This is unused except for as a safety slice bound and could be removed.
+ mir_len: u32,
+ /// Index into `Wasm.mir_extra`.
+ mir_extra_off: u32,
+ /// This is unused except for as a safety slice bound and could be removed.
+ mir_extra_len: u32,
+ locals_off: u32,
+ locals_len: u32,
+ prologue: Prologue,
+
+ pub const Prologue = extern struct {
+ flags: Flags,
+ sp_local: u32,
+ stack_size: u32,
+ bottom_stack_local: u32,
+
+ pub const Flags = packed struct(u32) {
+ stack_alignment: Alignment,
+ padding: u26 = 0,
+ };
+
+ pub const none: Prologue = .{
+ .sp_local = 0,
+ .flags = .{ .stack_alignment = .none },
+ .stack_size = 0,
+ .bottom_stack_local = 0,
+ };
+
+ pub fn isNone(p: *const Prologue) bool {
+ return p.flags.stack_alignment != .none;
+ }
+ };
+
+ pub fn lower(f: *Function, wasm: *const Wasm, code: *std.ArrayList(u8)) Allocator.Error!void {
+ const gpa = wasm.base.comp.gpa;
+
+ // Write the locals in the prologue of the function body.
+ const locals = wasm.all_zcu_locals[f.locals_off..][0..f.locals_len];
+ try code.ensureUnusedCapacity(gpa, 5 + locals.len * 6 + 38);
+
+ std.leb.writeUleb128(code.writer(gpa), @as(u32, @intCast(locals.len))) catch unreachable;
+ for (locals) |local| {
+ std.leb.writeUleb128(code.writer(gpa), @as(u32, 1)) catch unreachable;
+ code.appendAssumeCapacity(local);
+ }
+
+ // Stack management section of function prologue.
+ const stack_alignment = f.prologue.flags.stack_alignment;
+ if (stack_alignment.toByteUnits()) |align_bytes| {
+ const sp_global = try wasm.stackPointerGlobalIndex();
+ // load stack pointer
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_get));
+ std.leb.writeULEB128(code.writer(gpa), @intFromEnum(sp_global)) catch unreachable;
+ // store stack pointer so we can restore it when we return from the function
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
+ leb.writeUleb128(code.writer(gpa), f.prologue.sp_local) catch unreachable;
+ // get the total stack size
+ const aligned_stack: i32 = @intCast(f.stack_alignment.forward(f.prologue.stack_size));
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
+ leb.writeIleb128(code.writer(gpa), aligned_stack) catch unreachable;
+ // subtract it from the current stack pointer
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_sub));
+ // Get negative stack alignment
+ const neg_stack_align = @as(i32, @intCast(align_bytes)) * -1;
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
+ leb.writeIleb128(code.writer(gpa), neg_stack_align) catch unreachable;
+ // Bitwise-and the value to get the new stack pointer to ensure the
+ // pointers are aligned with the abi alignment.
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_and));
+ // The bottom will be used to calculate all stack pointer offsets.
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
+ leb.writeUleb128(code.writer(gpa), f.prologue.bottom_stack_local) catch unreachable;
+ // Store the current stack pointer value into the global stack pointer so other function calls will
+ // start from this value instead and not overwrite the current stack.
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
+ std.leb.writeULEB128(code.writer(gpa), @intFromEnum(sp_global)) catch unreachable;
+ }
+
+ var emit: Emit = .{
+ .mir = .{
+ .instruction_tags = wasm.mir_instructions.items(.tag)[f.mir_off..][0..f.mir_len],
+ .instruction_datas = wasm.mir_instructions.items(.data)[f.mir_off..][0..f.mir_len],
+ .extra = wasm.mir_extra[f.mir_extra_off..][0..f.mir_extra_len],
+ },
+ .wasm = wasm,
+ .code = code,
+ };
+ try emit.lowerToCode();
+ }
+};
+
+pub const Error = error{
+ OutOfMemory,
+ /// Compiler was asked to operate on a number larger than supported.
+ Overflow,
+ /// Indicates the error is already stored in Zcu `failed_codegen`.
+ CodegenFail,
+};
+
+pub fn function(
+ wasm: *Wasm,
pt: Zcu.PerThread,
- src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
- code: *std.ArrayListUnmanaged(u8),
- debug_output: link.File.DebugInfoOutput,
-) codegen.CodeGenError!void {
+) Error!Function {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
const file_scope = zcu.navFileScope(func.owner_nav);
const target = &file_scope.mod.resolved_target.result;
+ const fn_ty = zcu.navValue(func.owner_nav).typeOf(zcu);
+ const fn_info = zcu.typeToFunc(fn_ty).?;
+ const ip = &zcu.intern_pool;
+ const fn_ty_index = try genFunctype(wasm, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, target);
+ const returns = fn_ty_index.ptr(wasm).returns.slice(wasm);
+ const any_returns = returns.len != 0;
+
+ var cc_result = try resolveCallingConventionValues(pt, fn_ty, target);
+ defer cc_result.deinit(gpa);
+
var code_gen: CodeGen = .{
.gpa = gpa,
.pt = pt,
.air = air,
.liveness = liveness,
- .code = code,
.owner_nav = func.owner_nav,
- .src_loc = src_loc,
- .locals = .{},
.target = target,
- .bin_file = bin_file.cast(.wasm).?,
- .debug_output = debug_output,
+ .wasm = wasm,
.func_index = func_index,
+ .args = cc_result.args,
+ .return_value = cc_result.return_value,
+ .local_index = cc_result.local_index,
+ .mir_instructions = &wasm.mir_instructions,
+ .mir_extra = &wasm.mir_extra,
+ .locals = &wasm.all_zcu_locals,
};
defer code_gen.deinit();
- genFunc(&code_gen) catch |err| switch (err) {
+ return functionInner(&code_gen, any_returns) catch |err| switch (err) {
error.CodegenFail => return error.CodegenFail,
else => |e| return code_gen.fail("failed to generate function: {s}", .{@errorName(e)}),
};
}
-fn genFunc(func: *CodeGen) InnerError!void {
- const wasm = func.bin_file;
- const pt = func.pt;
+fn functionInner(cg: *CodeGen, any_returns: bool) InnerError!Function {
+ const wasm = cg.wasm;
+ const pt = cg.pt;
const zcu = pt.zcu;
- const ip = &zcu.intern_pool;
- const fn_ty = zcu.navValue(func.owner_nav).typeOf(zcu);
- const fn_info = zcu.typeToFunc(fn_ty).?;
- const fn_ty_index = try genFunctype(wasm, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*);
- var cc_result = try func.resolveCallingConventionValues(fn_ty);
- defer cc_result.deinit(func.gpa);
+ const start_mir_off: u32 = @intCast(wasm.mir_instructions.len);
+ const start_mir_extra_off: u32 = @intCast(wasm.mir_extra.items.len);
+ const start_locals_off: u32 = @intCast(wasm.all_zcu_locals.items.len);
- func.args = cc_result.args;
- func.return_value = cc_result.return_value;
-
- try func.addTag(.dbg_prologue_end);
-
- try func.branches.append(func.gpa, .{});
+ try cg.branches.append(cg.gpa, .{});
// clean up outer branch
defer {
- var outer_branch = func.branches.pop();
- outer_branch.deinit(func.gpa);
- assert(func.branches.items.len == 0); // missing branch merge
+ var outer_branch = cg.branches.pop();
+ outer_branch.deinit(cg.gpa);
+ assert(cg.branches.items.len == 0); // missing branch merge
}
// Generate MIR for function body
- try func.genBody(func.air.getMainBody());
+ try cg.genBody(cg.air.getMainBody());
// In case we have a return value, but the last instruction is a noreturn (such as a while loop)
// we emit an unreachable instruction to tell the stack validator that part will never be reached.
- const returns = fn_ty_index.ptr(wasm).returns.slice(wasm);
- if (returns.len != 0 and func.air.instructions.len > 0) {
- const inst: Air.Inst.Index = @enumFromInt(func.air.instructions.len - 1);
- const last_inst_ty = func.typeOfIndex(inst);
+ if (any_returns and cg.air.instructions.len > 0) {
+ const inst: Air.Inst.Index = @enumFromInt(cg.air.instructions.len - 1);
+ const last_inst_ty = cg.typeOfIndex(inst);
if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(zcu) or last_inst_ty.isNoReturn(zcu)) {
- try func.addTag(.@"unreachable");
+ try cg.addTag(.@"unreachable");
}
}
// End of function body
- try func.addTag(.end);
-
- try func.addTag(.dbg_epilogue_begin);
-
- // check if we have to initialize and allocate anything into the stack frame.
- // If so, create enough stack space and insert the instructions at the front of the list.
- if (func.initial_stack_value != .none) {
- var prologue = std.ArrayList(Mir.Inst).init(func.gpa);
- defer prologue.deinit();
-
- const sp = @intFromEnum(wasm.zig_object.?.stack_pointer_sym);
- // load stack pointer
- try prologue.append(.{ .tag = .global_get, .data = .{ .label = sp } });
- // store stack pointer so we can restore it when we return from the function
- try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } });
- // get the total stack size
- const aligned_stack = func.stack_alignment.forward(func.stack_size);
- try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(aligned_stack) } });
- // subtract it from the current stack pointer
- try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
- // Get negative stack alignment
- try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment.toByteUnits().?)) * -1 } });
- // Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment
- try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } });
- // store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets
- try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.bottom_stack_value.local.value } });
- // Store the current stack pointer value into the global stack pointer so other function calls will
- // start from this value instead and not overwrite the current stack.
- try prologue.append(.{ .tag = .global_set, .data = .{ .label = sp } });
-
- // reserve space and insert all prologue instructions at the front of the instruction list
- // We insert them in reserve order as there is no insertSlice in multiArrayList.
- try func.mir_instructions.ensureUnusedCapacity(func.gpa, prologue.items.len);
- for (prologue.items, 0..) |_, index| {
- const inst = prologue.items[prologue.items.len - 1 - index];
- func.mir_instructions.insertAssumeCapacity(0, inst);
- }
- }
-
- var mir: Mir = .{
- .instructions = func.mir_instructions.toOwnedSlice(),
- .extra = try func.mir_extra.toOwnedSlice(func.gpa),
- };
- defer mir.deinit(func.gpa);
-
- var emit: Emit = .{
- .mir = mir,
- .bin_file = wasm,
- .code = func.code,
- .locals = func.locals.items,
- .owner_nav = func.owner_nav,
- .dbg_output = func.debug_output,
- .prev_di_line = 0,
- .prev_di_column = 0,
- .prev_di_offset = 0,
- };
-
- emit.emitMir() catch |err| switch (err) {
- error.EmitFail => {
- func.err_msg = emit.error_msg.?;
- return error.CodegenFail;
+ try cg.addTag(.end);
+ try cg.addTag(.dbg_epilogue_begin);
+
+ return .{
+ .mir_off = start_mir_off,
+ .mir_len = @intCast(wasm.mir_instructions.len - start_mir_off),
+ .mir_extra_off = start_mir_extra_off,
+ .mir_extra_len = @intCast(wasm.mir_extra.items.len - start_mir_extra_off),
+ .locals_off = start_locals_off,
+ .locals_len = @intCast(wasm.all_zcu_locals.items.len - start_locals_off),
+ .prologue = if (cg.initial_stack_value == .none) .none else .{
+ .sp_local = cg.initial_stack_value.local.value,
+ .flags = .{ .stack_alignment = cg.stack_alignment },
+ .stack_size = cg.stack_size,
+ .bottom_stack_local = cg.bottom_stack_value.local.value,
},
- else => |e| return e,
};
}
const CallWValues = struct {
args: []WValue,
return_value: WValue,
+ local_index: u32,
fn deinit(values: *CallWValues, gpa: Allocator) void {
gpa.free(values.args);
@@ -1313,28 +1371,34 @@ const CallWValues = struct {
}
};
-fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues {
- const pt = func.pt;
+fn resolveCallingConventionValues(
+ pt: Zcu.PerThread,
+ fn_ty: Type,
+ target: *const std.Target,
+) Allocator.Error!CallWValues {
const zcu = pt.zcu;
+ const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const fn_info = zcu.typeToFunc(fn_ty).?;
const cc = fn_info.cc;
+
var result: CallWValues = .{
.args = &.{},
.return_value = .none,
+ .local_index = 0,
};
if (cc == .naked) return result;
- var args = std.ArrayList(WValue).init(func.gpa);
+ var args = std.ArrayList(WValue).init(gpa);
defer args.deinit();
// Check if we store the result as a pointer to the stack rather than
// by value
- if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) {
+ if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, target)) {
// the sret arg will be passed as first argument, therefore we
// set the `return_value` before allocating locals for regular args.
- result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } };
- func.local_index += 1;
+ result.return_value = .{ .local = .{ .value = result.local_index, .references = 1 } };
+ result.local_index += 1;
}
switch (cc) {
@@ -1344,8 +1408,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
continue;
}
- try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
- func.local_index += 1;
+ try args.append(.{ .local = .{ .value = result.local_index, .references = 1 } });
+ result.local_index += 1;
}
},
.wasm_watc => {
@@ -1353,18 +1417,23 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
const ty_classes = abi.classifyType(Type.fromInterned(ty), zcu);
for (ty_classes) |class| {
if (class == .none) continue;
- try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
- func.local_index += 1;
+ try args.append(.{ .local = .{ .value = result.local_index, .references = 1 } });
+ result.local_index += 1;
}
}
},
- else => return func.fail("calling convention '{s}' not supported for Wasm", .{@tagName(cc)}),
+ else => unreachable, // Frontend is responsible for emitting an error earlier.
}
result.args = try args.toOwnedSlice();
return result;
}
-fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.PerThread, target: std.Target) bool {
+fn firstParamSRet(
+ cc: std.builtin.CallingConvention,
+ return_type: Type,
+ pt: Zcu.PerThread,
+ target: *const std.Target,
+) bool {
switch (cc) {
.@"inline" => unreachable,
.auto => return isByRef(return_type, pt, target),
@@ -1466,8 +1535,7 @@ fn restoreStackPointer(func: *CodeGen) !void {
// Get the original stack pointer's value
try func.emitWValue(func.initial_stack_value);
- // save its value in the global stack pointer
- try func.addLabel(.global_set, @intFromEnum(func.bin_file.zig_object.?.stack_pointer_sym));
+ try func.addTag(.global_set_sp);
}
/// From a given type, will create space on the virtual stack to store the value of such type.
@@ -1675,7 +1743,7 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch {
/// For a given `Type`, will return true when the type will be passed
/// by reference, rather than by value
-fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
+fn isByRef(ty: Type, pt: Zcu.PerThread, target: *const std.Target) bool {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
@@ -1716,7 +1784,7 @@ fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
.vector => return determineSimdStoreStrategy(ty, zcu, target) == .unrolled,
.int => return ty.intInfo(zcu).bits > 64,
.@"enum" => return ty.intInfo(zcu).bits > 64,
- .float => return ty.floatBits(target) > 64,
+ .float => return ty.floatBits(target.*) > 64,
.error_union => {
const pl_ty = ty.errorUnionPayload(zcu);
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
@@ -1747,7 +1815,7 @@ const SimdStoreStrategy = enum {
/// This means when a given type is 128 bits and either the simd128 or relaxed-simd
/// features are enabled, the function will return `.direct`. This would allow to store
/// it using a instruction, rather than an unrolled version.
-fn determineSimdStoreStrategy(ty: Type, zcu: *Zcu, target: std.Target) SimdStoreStrategy {
+fn determineSimdStoreStrategy(ty: Type, zcu: *Zcu, target: *const std.Target) SimdStoreStrategy {
assert(ty.zigTypeTag(zcu) == .vector);
if (ty.bitSize(zcu) != 128) return .unrolled;
const hasFeature = std.Target.wasm.featureSetHas;
@@ -2076,7 +2144,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.op = .load,
.width = @as(u8, @intCast(scalar_type.abiSize(zcu) * 8)),
.signedness = if (scalar_type.isSignedInt(zcu)) .signed else .unsigned,
- .valtype1 = typeToValtype(scalar_type, pt, func.target.*),
+ .valtype1 = typeToValtype(scalar_type, pt, func.target),
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = operand.offset(),
@@ -2109,7 +2177,7 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?;
- if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) {
+ if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target)) {
break :result func.return_value;
}
@@ -2131,7 +2199,7 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (ret_ty.isError(zcu)) {
try func.addImm32(0);
}
- } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) {
+ } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target)) {
// leave on the stack
_ = try func.load(operand, ret_ty, 0);
}
@@ -2142,7 +2210,7 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) InnerError!void {
- const wasm = func.bin_file;
+ const wasm = func.wasm;
if (modifier == .always_tail) return func.fail("TODO implement tail calls for wasm", .{});
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = func.air.extraData(Air.Call, pl_op.payload);
@@ -2159,7 +2227,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
};
const ret_ty = fn_ty.fnReturnType(zcu);
const fn_info = zcu.typeToFunc(fn_ty).?;
- const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*);
+ const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target);
const callee: ?InternPool.Nav.Index = blk: {
const func_val = (try func.air.value(pl_op.operand, pt)) orelse break :blk null;
@@ -2199,7 +2267,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const operand = try func.resolveInst(pl_op.operand);
try func.emitWValue(operand);
- const fn_type_index = try genFunctype(wasm, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*);
+ const fn_type_index = try genFunctype(wasm, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target);
try func.addLabel(.call_indirect, @intFromEnum(fn_type_index));
}
@@ -2260,7 +2328,7 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
// load the value, and then shift+or the rhs into the result location.
const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8);
- if (isByRef(int_elem_ty, pt, func.target.*)) {
+ if (isByRef(int_elem_ty, pt, func.target)) {
return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
}
@@ -2326,11 +2394,11 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .@"struct", .array, .@"union" => if (isByRef(ty, pt, func.target.*)) {
+ .@"struct", .array, .@"union" => if (isByRef(ty, pt, func.target)) {
const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .vector => switch (determineSimdStoreStrategy(ty, zcu, func.target.*)) {
+ .vector => switch (determineSimdStoreStrategy(ty, zcu, func.target)) {
.unrolled => {
const len: u32 = @intCast(abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
@@ -2388,7 +2456,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
// into lhs, so we calculate that and emit that instead
try func.lowerToStack(rhs);
- const valtype = typeToValtype(ty, pt, func.target.*);
+ const valtype = typeToValtype(ty, pt, func.target);
const opcode = buildOpcode(.{
.valtype1 = valtype,
.width = @as(u8, @intCast(abi_size * 8)),
@@ -2417,7 +2485,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{ty_op.operand});
const result = result: {
- if (isByRef(ty, pt, func.target.*)) {
+ if (isByRef(ty, pt, func.target)) {
const new_local = try func.allocStack(ty);
try func.store(new_local, operand, ty, 0);
break :result new_local;
@@ -2467,7 +2535,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
const abi_size: u8 = @intCast(ty.abiSize(zcu));
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, pt, func.target.*),
+ .valtype1 = typeToValtype(ty, pt, func.target),
.width = abi_size * 8,
.op = .load,
.signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned,
@@ -2517,19 +2585,6 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
func.arg_index += 1;
}
- switch (func.debug_output) {
- .dwarf => |dwarf| {
- const name = func.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
- if (name != .none) try dwarf.genLocalDebugInfo(
- .local_arg,
- name.toSlice(func.air),
- arg_ty,
- .{ .wasm_ext = .{ .local = arg.local.value } },
- );
- },
- else => {},
- }
-
return func.finishAir(inst, arg, &.{});
}
@@ -2577,7 +2632,7 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
return func.floatOp(float_op, ty, &.{ lhs, rhs });
}
- if (isByRef(ty, pt, func.target.*)) {
+ if (isByRef(ty, pt, func.target)) {
if (ty.zigTypeTag(zcu) == .int) {
return func.binOpBigInt(lhs, rhs, ty, op);
} else {
@@ -2590,7 +2645,7 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
const opcode: std.wasm.Opcode = buildOpcode(.{
.op = op,
- .valtype1 = typeToValtype(ty, pt, func.target.*),
+ .valtype1 = typeToValtype(ty, pt, func.target),
.signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned,
});
try func.emitWValue(lhs);
@@ -2854,7 +2909,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
for (args) |operand| {
try func.emitWValue(operand);
}
- const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, pt, func.target.*) });
+ const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, pt, func.target) });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
}
@@ -3141,8 +3196,8 @@ fn lowerNavRef(func: *CodeGen, nav_index: InternPool.Nav.Index, offset: u32) Inn
return .{ .imm32 = 0xaaaaaaaa };
}
- const atom_index = try func.bin_file.getOrCreateAtomForNav(pt, nav_index);
- const atom = func.bin_file.getAtom(atom_index);
+ const atom_index = try func.wasm.getOrCreateAtomForNav(pt, nav_index);
+ const atom = func.wasm.getAtom(atom_index);
const target_sym_index = @intFromEnum(atom.sym_index);
if (ip.isFunctionType(nav_ty)) {
@@ -3156,7 +3211,7 @@ fn lowerNavRef(func: *CodeGen, nav_index: InternPool.Nav.Index, offset: u32) Inn
fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
const pt = func.pt;
const zcu = pt.zcu;
- assert(!isByRef(ty, pt, func.target.*));
+ assert(!isByRef(ty, pt, func.target));
const ip = &zcu.intern_pool;
if (val.isUndefDeep(zcu)) return func.emitUndefined(ty);
@@ -3267,7 +3322,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
.aggregate => switch (ip.indexToKey(ty.ip_index)) {
.array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}),
.vector_type => {
- assert(determineSimdStoreStrategy(ty, zcu, func.target.*) == .direct);
+ assert(determineSimdStoreStrategy(ty, zcu, func.target) == .direct);
var buf: [16]u8 = undefined;
val.writeToMemory(pt, &buf) catch unreachable;
return func.storeSimdImmd(buf);
@@ -3398,11 +3453,11 @@ fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn lowerBlock(func: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []const Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const wasm_block_ty = genBlockType(block_ty, pt, func.target.*);
+ const wasm_block_ty = genBlockType(block_ty, pt, func.target);
// if wasm_block_ty is non-empty, we create a register to store the temporary value
const block_result: WValue = if (wasm_block_ty != std.wasm.block_empty) blk: {
- const ty: Type = if (isByRef(block_ty, pt, func.target.*)) Type.u32 else block_ty;
+ const ty: Type = if (isByRef(block_ty, pt, func.target)) Type.u32 else block_ty;
break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten
} else .none;
@@ -3527,7 +3582,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
}
} else if (ty.isAnyFloat()) {
return func.cmpFloat(ty, lhs, rhs, op);
- } else if (isByRef(ty, pt, func.target.*)) {
+ } else if (isByRef(ty, pt, func.target)) {
return func.cmpBigInt(lhs, rhs, ty, op);
}
@@ -3545,7 +3600,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
try func.lowerToStack(rhs);
const opcode: std.wasm.Opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, pt, func.target.*),
+ .valtype1 = typeToValtype(ty, pt, func.target),
.op = switch (op) {
.lt => .lt,
.lte => .le,
@@ -3612,7 +3667,7 @@ fn airCmpVector(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try func.resolveInst(un_op);
- const sym_index = try func.bin_file.getGlobalSymbol("__zig_errors_len", null);
+ const sym_index = try func.wasm.getGlobalSymbol("__zig_errors_len", null);
const errors_len: WValue = .{ .memory = @intFromEnum(sym_index) };
try func.emitWValue(operand);
@@ -3758,7 +3813,7 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try func.bitcast(wanted_ty, given_ty, operand);
}
- if (isByRef(given_ty, pt, func.target.*) and !isByRef(wanted_ty, pt, func.target.*)) {
+ if (isByRef(given_ty, pt, func.target) and !isByRef(wanted_ty, pt, func.target)) {
const loaded_memory = try func.load(operand, wanted_ty, 0);
if (needs_wrapping) {
break :result try func.wrapOperand(loaded_memory, wanted_ty);
@@ -3766,7 +3821,7 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result loaded_memory;
}
}
- if (!isByRef(given_ty, pt, func.target.*) and isByRef(wanted_ty, pt, func.target.*)) {
+ if (!isByRef(given_ty, pt, func.target) and isByRef(wanted_ty, pt, func.target)) {
const stack_memory = try func.allocStack(wanted_ty);
try func.store(stack_memory, operand, given_ty, 0);
if (needs_wrapping) {
@@ -3796,8 +3851,8 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn
const opcode = buildOpcode(.{
.op = .reinterpret,
- .valtype1 = typeToValtype(wanted_ty, pt, func.target.*),
- .valtype2 = typeToValtype(given_ty, pt, func.target.*),
+ .valtype1 = typeToValtype(wanted_ty, pt, func.target),
+ .valtype2 = typeToValtype(given_ty, pt, func.target),
});
try func.emitWValue(operand);
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
@@ -3919,8 +3974,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try func.trunc(shifted_value, field_ty, backing_ty);
},
.@"union" => result: {
- if (isByRef(struct_ty, pt, func.target.*)) {
- if (!isByRef(field_ty, pt, func.target.*)) {
+ if (isByRef(struct_ty, pt, func.target)) {
+ if (!isByRef(field_ty, pt, func.target)) {
break :result try func.load(operand, field_ty, 0);
} else {
const new_stack_val = try func.allocStack(field_ty);
@@ -3946,7 +4001,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, zcu)) orelse {
return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)});
};
- if (isByRef(field_ty, pt, func.target.*)) {
+ if (isByRef(field_ty, pt, func.target)) {
switch (operand) {
.stack_offset => |stack_offset| {
break :result .{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } };
@@ -4209,7 +4264,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
}
const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu)));
- if (op_is_ptr or isByRef(payload_ty, pt, func.target.*)) {
+ if (op_is_ptr or isByRef(payload_ty, pt, func.target)) {
break :result try func.buildPointerOffset(operand, pl_offset, .new);
}
@@ -4436,7 +4491,7 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
if (opt_ty.optionalReprIsPayload(zcu)) break :result func.reuseOperand(ty_op.operand, operand);
- if (isByRef(payload_ty, pt, func.target.*)) {
+ if (isByRef(payload_ty, pt, func.target)) {
break :result try func.buildPointerOffset(operand, 0, .new);
}
@@ -4570,7 +4625,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
- const elem_result = if (isByRef(elem_ty, pt, func.target.*))
+ const elem_result = if (isByRef(elem_ty, pt, func.target))
.stack
else
try func.load(.stack, elem_ty, 0);
@@ -4729,7 +4784,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
- const elem_result = if (isByRef(elem_ty, pt, func.target.*))
+ const elem_result = if (isByRef(elem_ty, pt, func.target))
.stack
else
try func.load(.stack, elem_ty, 0);
@@ -4780,7 +4835,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
else => ptr_ty.childType(zcu),
};
- const valtype = typeToValtype(Type.usize, pt, func.target.*);
+ const valtype = typeToValtype(Type.usize, pt, func.target);
const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul });
const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op });
@@ -4927,7 +4982,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const elem_ty = array_ty.childType(zcu);
const elem_size = elem_ty.abiSize(zcu);
- if (isByRef(array_ty, pt, func.target.*)) {
+ if (isByRef(array_ty, pt, func.target)) {
try func.lowerToStack(array);
try func.emitWValue(index);
try func.addImm32(@intCast(elem_size));
@@ -4970,7 +5025,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
}
- const elem_result = if (isByRef(elem_ty, pt, func.target.*))
+ const elem_result = if (isByRef(elem_ty, pt, func.target))
.stack
else
try func.load(.stack, elem_ty, 0);
@@ -5014,8 +5069,8 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(operand);
const op = buildOpcode(.{
.op = .trunc,
- .valtype1 = typeToValtype(dest_ty, pt, func.target.*),
- .valtype2 = typeToValtype(op_ty, pt, func.target.*),
+ .valtype1 = typeToValtype(dest_ty, pt, func.target),
+ .valtype2 = typeToValtype(op_ty, pt, func.target),
.signedness = dest_info.signedness,
});
try func.addTag(Mir.Inst.Tag.fromOpcode(op));
@@ -5059,8 +5114,8 @@ fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(operand);
const op = buildOpcode(.{
.op = .convert,
- .valtype1 = typeToValtype(dest_ty, pt, func.target.*),
- .valtype2 = typeToValtype(op_ty, pt, func.target.*),
+ .valtype1 = typeToValtype(dest_ty, pt, func.target),
+ .valtype2 = typeToValtype(op_ty, pt, func.target),
.signedness = op_info.signedness,
});
try func.addTag(Mir.Inst.Tag.fromOpcode(op));
@@ -5076,7 +5131,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.typeOfIndex(inst);
const elem_ty = ty.childType(zcu);
- if (determineSimdStoreStrategy(ty, zcu, func.target.*) == .direct) blk: {
+ if (determineSimdStoreStrategy(ty, zcu, func.target) == .direct) blk: {
switch (operand) {
// when the operand lives in the linear memory section, we can directly
// load and splat the value at once. Meaning we do not first have to load
@@ -5160,7 +5215,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const elem_size = child_ty.abiSize(zcu);
// TODO: One of them could be by ref; handle in loop
- if (isByRef(func.typeOf(extra.a), pt, func.target.*) or isByRef(inst_ty, pt, func.target.*)) {
+ if (isByRef(func.typeOf(extra.a), pt, func.target) or isByRef(inst_ty, pt, func.target)) {
const result = try func.allocStack(inst_ty);
for (0..mask_len) |index| {
@@ -5236,7 +5291,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When the element type is by reference, we must copy the entire
// value. It is therefore safer to move the offset pointer and store
// each value individually, instead of using store offsets.
- if (isByRef(elem_ty, pt, func.target.*)) {
+ if (isByRef(elem_ty, pt, func.target)) {
// copy stack pointer into a temporary local, which is
// moved for each element to store each value in the right position.
const offset = try func.buildPointerOffset(result, 0, .new);
@@ -5266,7 +5321,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.@"struct" => switch (result_ty.containerLayout(zcu)) {
.@"packed" => {
- if (isByRef(result_ty, pt, func.target.*)) {
+ if (isByRef(result_ty, pt, func.target)) {
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
}
const packed_struct = zcu.typeToPackedStruct(result_ty).?;
@@ -5369,15 +5424,15 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (layout.tag_size == 0) {
break :result .none;
}
- assert(!isByRef(union_ty, pt, func.target.*));
+ assert(!isByRef(union_ty, pt, func.target));
break :result tag_int;
}
- if (isByRef(union_ty, pt, func.target.*)) {
+ if (isByRef(union_ty, pt, func.target)) {
const result_ptr = try func.allocStack(union_ty);
const payload = try func.resolveInst(extra.init);
if (layout.tag_align.compare(.gte, layout.payload_align)) {
- if (isByRef(field_ty, pt, func.target.*)) {
+ if (isByRef(field_ty, pt, func.target)) {
const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
try func.store(payload_ptr, payload, field_ty, 0);
} else {
@@ -5458,7 +5513,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
_ = try func.load(lhs, payload_ty, 0);
_ = try func.load(rhs, payload_ty, 0);
- const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, pt, func.target.*) });
+ const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, pt, func.target) });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
try func.addLabel(.br_if, 0);
@@ -5910,7 +5965,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// As the names are global and the slice elements are constant, we do not have
// to make a copy of the ptr+value but can point towards them directly.
const pt = func.pt;
- const error_table_symbol = try func.bin_file.getErrorTableSymbol(pt);
+ const error_table_symbol = try func.wasm.getErrorTableSymbol(pt);
const name_ty = Type.slice_const_u8_sentinel_0;
const abi_size = name_ty.abiSize(pt.zcu);
@@ -5943,7 +5998,7 @@ fn airPtrSliceFieldPtr(func: *CodeGen, inst: Air.Inst.Index, offset: u32) InnerE
/// NOTE: Allocates place for result on virtual stack, when integer size > 64 bits
fn intZeroValue(func: *CodeGen, ty: Type) InnerError!WValue {
- const zcu = func.bin_file.base.comp.zcu.?;
+ const zcu = func.wasm.base.comp.zcu.?;
const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: Implement intZeroValue for integer bitsize: {d}", .{int_info.bits});
@@ -6379,8 +6434,6 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airDbgStmt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{});
-
const dbg_stmt = func.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
try func.addInst(.{ .tag = .dbg_line, .data = .{
.payload = try func.addExtra(Mir.DbgLineColumn{
@@ -6405,26 +6458,7 @@ fn airDbgVar(
is_ptr: bool,
) InnerError!void {
_ = is_ptr;
- if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{});
-
- const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
- const ty = func.typeOf(pl_op.operand);
- const operand = try func.resolveInst(pl_op.operand);
-
- log.debug("airDbgVar: %{d}: {}, {}", .{ inst, ty.fmtDebug(), operand });
-
- const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
- log.debug(" var name = ({s})", .{name.toSlice(func.air)});
-
- const loc: link.File.Dwarf.Loc = switch (operand) {
- .local => |local| .{ .wasm_ext = .{ .local = local.value } },
- else => blk: {
- log.debug("TODO generate debug info for {}", .{operand});
- break :blk .empty;
- },
- };
- try func.debug_output.dwarf.genLocalDebugInfo(local_tag, name.toSlice(func.air), ty, loc);
-
+ _ = local_tag;
return func.finishAir(inst, .none, &.{});
}
@@ -6500,7 +6534,7 @@ fn lowerTry(
}
const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
- if (isByRef(pl_ty, pt, func.target.*)) {
+ if (isByRef(pl_ty, pt, func.target)) {
return buildPointerOffset(func, err_union, pl_offset, .new);
}
const payload = try func.load(err_union, pl_ty, pl_offset);
@@ -7074,15 +7108,15 @@ fn callIntrinsic(
args: []const WValue,
) InnerError!WValue {
assert(param_types.len == args.len);
- const wasm = func.bin_file;
+ const wasm = func.wasm;
const pt = func.pt;
const zcu = pt.zcu;
- const func_type_index = try genFunctype(wasm, .{ .wasm_watc = .{} }, param_types, return_type, pt, func.target.*);
+ const func_type_index = try genFunctype(wasm, .{ .wasm_watc = .{} }, param_types, return_type, pt, func.target);
const func_index = wasm.getOutputFunction(try wasm.internString(name), func_type_index);
// Always pass over C-ABI
- const want_sret_param = firstParamSRet(.{ .wasm_watc = .{} }, return_type, pt, func.target.*);
+ const want_sret_param = firstParamSRet(.{ .wasm_watc = .{} }, return_type, pt, func.target);
// if we want return as first param, we allocate a pointer to stack,
// and emit it as our first argument
const sret = if (want_sret_param) blk: {
@@ -7121,7 +7155,7 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.lowerToStack(result_ptr);
try func.emitWValue(operand);
- try func.addCallTagName(enum_ty.toIntern());
+ try func.addIpIndex(.call_tag_name, enum_ty.toIntern());
return func.finishAir(inst, result_ptr, &.{un_op});
}
@@ -7265,7 +7299,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :val ptr_val;
};
- const result = if (isByRef(result_ty, pt, func.target.*)) val: {
+ const result = if (isByRef(result_ty, pt, func.target)) val: {
try func.emitWValue(cmp_result);
try func.addImm32(~@as(u32, 0));
try func.addTag(.i32_xor);