aboutsummaryrefslogtreecommitdiff
path: root/src/arch/wasm/CodeGen.zig
diff options
context:
space:
mode:
authormlugg <mlugg@mlugg.co.uk>2024-07-28 17:09:14 +0100
committermlugg <mlugg@mlugg.co.uk>2024-08-11 07:29:41 +0100
commit548a087fafeda5b07d2237d5137906b8d07da699 (patch)
tree69135f129b84ab5b65f443d0a52899b232696e2b /src/arch/wasm/CodeGen.zig
parent531cd177e89c1edfcd2e52f74f220eb186a25f78 (diff)
downloadzig-548a087fafeda5b07d2237d5137906b8d07da699.tar.gz
zig-548a087fafeda5b07d2237d5137906b8d07da699.zip
compiler: split Decl into Nav and Cau
The type `Zcu.Decl` in the compiler is problematic: over time it has gained many responsibilities. Every source declaration, container type, generic instantiation, and `@extern` has a `Decl`. The functions of these `Decl`s are in some cases entirely disjoint. After careful analysis, I determined that the two main responsibilities of `Decl` are as follows: * A `Decl` acts as the "subject" of semantic analysis at comptime. A single unit of analysis is either a runtime function body, or a `Decl`. It registers incremental dependencies, tracks analysis errors, etc. * A `Decl` acts as a "global variable": a pointer to it is consistent, and it may be lowered to a specific symbol by the codegen backend. This commit eliminates `Decl` and introduces new types to model these responsibilities: `Cau` (Comptime Analysis Unit) and `Nav` (Named Addressable Value). Every source declaration, and every container type requiring resolution (so *not* including `opaque`), has a `Cau`. For a source declaration, this `Cau` performs the resolution of its value. (When #131 is implemented, it is unsolved whether type and value resolution will share a `Cau` or have two distinct `Cau`s.) For a type, this `Cau` is the context in which type resolution occurs. Every non-`comptime` source declaration, every generic instantiation, and every distinct `extern` has a `Nav`. These are sent to codegen/link: the backends by definition do not care about `Cau`s. This commit has some minor technically-breaking changes surrounding `usingnamespace`. I don't think they'll impact anyone, since the changes are fixes around semantics which were previously inconsistent (the behavior changed depending on hashmap iteration order!). Aside from that, this changeset has no significant user-facing changes. Instead, it is an internal refactor which makes it easier to correctly model the responsibilities of different objects, particularly regarding incremental compilation. The performance impact should be negligible, but I will take measurements before merging this work into `master`. Co-authored-by: Jacob Young <jacobly0@users.noreply.github.com> Co-authored-by: Jakub Konka <kubkon@jakubkonka.com>
Diffstat (limited to 'src/arch/wasm/CodeGen.zig')
-rw-r--r--src/arch/wasm/CodeGen.zig391
1 files changed, 194 insertions, 197 deletions
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 68b7f72938..be049ec975 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -640,8 +640,8 @@ const CodeGen = @This();
/// Reference to the function declaration the code
/// section belongs to
-decl: *Decl,
-decl_index: InternPool.DeclIndex,
+owner_nav: InternPool.Nav.Index,
+src_loc: Zcu.LazySrcLoc,
/// Current block depth. Used to calculate the relative difference between a break
/// and block
block_depth: u32 = 0,
@@ -681,7 +681,7 @@ locals: std.ArrayListUnmanaged(u8),
/// are enabled also.
simd_immediates: std.ArrayListUnmanaged([16]u8) = .{},
/// The Target we're emitting (used to call intInfo)
-target: std.Target,
+target: *const std.Target,
/// Represents the wasm binary file that is being linked.
bin_file: *link.File.Wasm,
pt: Zcu.PerThread,
@@ -765,8 +765,7 @@ pub fn deinit(func: *CodeGen) void {
/// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig
fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError {
- const src_loc = func.decl.navSrcLoc(func.pt.zcu);
- func.err_msg = try Zcu.ErrorMsg.create(func.gpa, src_loc, fmt, args);
+ func.err_msg = try Zcu.ErrorMsg.create(func.gpa, func.src_loc, fmt, args);
return error.CodegenFail;
}
@@ -803,8 +802,14 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
//
// In the other cases, we will simply lower the constant to a value that fits
// into a single local (such as a pointer, integer, bool, etc).
- const result: WValue = if (isByRef(ty, pt))
- .{ .memory = try func.bin_file.lowerUnnamedConst(pt, val, func.decl_index) }
+ const result: WValue = if (isByRef(ty, pt, func.target.*))
+ switch (try func.bin_file.lowerUav(pt, val.toIntern(), .none, func.src_loc)) {
+ .mcv => |mcv| .{ .memory = mcv.load_symbol },
+ .fail => |err_msg| {
+ func.err_msg = err_msg;
+ return error.CodegenFail;
+ },
+ }
else
try func.lowerConstant(val, ty);
@@ -995,9 +1000,8 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
}
/// Using a given `Type`, returns the corresponding valtype for .auto callconv
-fn typeToValtype(ty: Type, pt: Zcu.PerThread) wasm.Valtype {
+fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype {
const mod = pt.zcu;
- const target = mod.getTarget();
const ip = &mod.intern_pool;
return switch (ty.zigTypeTag(mod)) {
.Float => switch (ty.floatBits(target)) {
@@ -1015,19 +1019,19 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread) wasm.Valtype {
.Struct => blk: {
if (pt.zcu.typeToPackedStruct(ty)) |packed_struct| {
const backing_int_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
- break :blk typeToValtype(backing_int_ty, pt);
+ break :blk typeToValtype(backing_int_ty, pt, target);
} else {
break :blk .i32;
}
},
- .Vector => switch (determineSimdStoreStrategy(ty, pt)) {
+ .Vector => switch (determineSimdStoreStrategy(ty, pt, target)) {
.direct => .v128,
.unrolled => .i32,
},
.Union => switch (ty.containerLayout(pt.zcu)) {
.@"packed" => blk: {
const int_ty = pt.intType(.unsigned, @as(u16, @intCast(ty.bitSize(pt)))) catch @panic("out of memory");
- break :blk typeToValtype(int_ty, pt);
+ break :blk typeToValtype(int_ty, pt, target);
},
else => .i32,
},
@@ -1036,17 +1040,17 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread) wasm.Valtype {
}
/// Using a given `Type`, returns the byte representation of its wasm value type
-fn genValtype(ty: Type, pt: Zcu.PerThread) u8 {
- return wasm.valtype(typeToValtype(ty, pt));
+fn genValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) u8 {
+ return wasm.valtype(typeToValtype(ty, pt, target));
}
/// Using a given `Type`, returns the corresponding wasm value type
/// Differently from `genValtype` this also allows `void` to create a block
/// with no return type
-fn genBlockType(ty: Type, pt: Zcu.PerThread) u8 {
+fn genBlockType(ty: Type, pt: Zcu.PerThread, target: std.Target) u8 {
return switch (ty.ip_index) {
.void_type, .noreturn_type => wasm.block_empty,
- else => genValtype(ty, pt),
+ else => genValtype(ty, pt, target),
};
}
@@ -1108,7 +1112,7 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue {
/// Returns a corresponding `Wvalue` with `local` as active tag
fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
const pt = func.pt;
- const valtype = typeToValtype(ty, pt);
+ const valtype = typeToValtype(ty, pt, func.target.*);
const index_or_null = switch (valtype) {
.i32 => func.free_locals_i32.popOrNull(),
.i64 => func.free_locals_i64.popOrNull(),
@@ -1128,7 +1132,7 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
/// to use a zero-initialized local.
fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
const pt = func.pt;
- try func.locals.append(func.gpa, genValtype(ty, pt));
+ try func.locals.append(func.gpa, genValtype(ty, pt, func.target.*));
const initial_index = func.local_index;
func.local_index += 1;
return .{ .local = .{ .value = initial_index, .references = 1 } };
@@ -1142,6 +1146,7 @@ fn genFunctype(
params: []const InternPool.Index,
return_type: Type,
pt: Zcu.PerThread,
+ target: std.Target,
) !wasm.Type {
const mod = pt.zcu;
var temp_params = std.ArrayList(wasm.Valtype).init(gpa);
@@ -1149,16 +1154,16 @@ fn genFunctype(
var returns = std.ArrayList(wasm.Valtype).init(gpa);
defer returns.deinit();
- if (firstParamSRet(cc, return_type, pt)) {
+ if (firstParamSRet(cc, return_type, pt, target)) {
try temp_params.append(.i32); // memory address is always a 32-bit handle
} else if (return_type.hasRuntimeBitsIgnoreComptime(pt)) {
if (cc == .C) {
const res_classes = abi.classifyType(return_type, pt);
assert(res_classes[0] == .direct and res_classes[1] == .none);
const scalar_type = abi.scalarType(return_type, pt);
- try returns.append(typeToValtype(scalar_type, pt));
+ try returns.append(typeToValtype(scalar_type, pt, target));
} else {
- try returns.append(typeToValtype(return_type, pt));
+ try returns.append(typeToValtype(return_type, pt, target));
}
} else if (return_type.isError(mod)) {
try returns.append(.i32);
@@ -1175,9 +1180,9 @@ fn genFunctype(
if (param_classes[1] == .none) {
if (param_classes[0] == .direct) {
const scalar_type = abi.scalarType(param_type, pt);
- try temp_params.append(typeToValtype(scalar_type, pt));
+ try temp_params.append(typeToValtype(scalar_type, pt, target));
} else {
- try temp_params.append(typeToValtype(param_type, pt));
+ try temp_params.append(typeToValtype(param_type, pt, target));
}
} else {
// i128/f128
@@ -1185,7 +1190,7 @@ fn genFunctype(
try temp_params.append(.i64);
}
},
- else => try temp_params.append(typeToValtype(param_type, pt)),
+ else => try temp_params.append(typeToValtype(param_type, pt, target)),
}
}
@@ -1205,25 +1210,23 @@ pub fn generate(
code: *std.ArrayList(u8),
debug_output: codegen.DebugInfoOutput,
) codegen.CodeGenError!codegen.Result {
- _ = src_loc;
const zcu = pt.zcu;
const gpa = zcu.gpa;
const func = zcu.funcInfo(func_index);
- const decl = zcu.declPtr(func.owner_decl);
- const namespace = zcu.namespacePtr(decl.src_namespace);
- const target = namespace.fileScope(zcu).mod.resolved_target.result;
+ const file_scope = zcu.navFileScope(func.owner_nav);
+ const target = &file_scope.mod.resolved_target.result;
var code_gen: CodeGen = .{
.gpa = gpa,
.pt = pt,
.air = air,
.liveness = liveness,
.code = code,
- .decl_index = func.owner_decl,
- .decl = decl,
+ .owner_nav = func.owner_nav,
+ .src_loc = src_loc,
.err_msg = undefined,
.locals = .{},
.target = target,
- .bin_file = bin_file.cast(link.File.Wasm).?,
+ .bin_file = bin_file.cast(.wasm).?,
.debug_output = debug_output,
.func_index = func_index,
};
@@ -1241,12 +1244,13 @@ fn genFunc(func: *CodeGen) InnerError!void {
const pt = func.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
- const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?;
- var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt);
+ const fn_ty = mod.navValue(func.owner_nav).typeOf(mod);
+ const fn_info = mod.typeToFunc(fn_ty).?;
+ var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*);
defer func_type.deinit(func.gpa);
- _ = try func.bin_file.storeDeclType(func.decl_index, func_type);
+ _ = try func.bin_file.storeNavType(func.owner_nav, func_type);
- var cc_result = try func.resolveCallingConventionValues(func.decl.typeOf(mod));
+ var cc_result = try func.resolveCallingConventionValues(fn_ty);
defer cc_result.deinit(func.gpa);
func.args = cc_result.args;
@@ -1324,7 +1328,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
.bin_file = func.bin_file,
.code = func.code,
.locals = func.locals.items,
- .decl_index = func.decl_index,
+ .owner_nav = func.owner_nav,
.dbg_output = func.debug_output,
.prev_di_line = 0,
.prev_di_column = 0,
@@ -1367,7 +1371,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
// Check if we store the result as a pointer to the stack rather than
// by value
- if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt)) {
+ if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) {
// the sret arg will be passed as first argument, therefore we
// set the `return_value` before allocating locals for regular args.
result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } };
@@ -1401,9 +1405,9 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
return result;
}
-fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.PerThread) bool {
+fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.PerThread, target: std.Target) bool {
switch (cc) {
- .Unspecified, .Inline => return isByRef(return_type, pt),
+ .Unspecified, .Inline => return isByRef(return_type, pt, target),
.C => {
const ty_classes = abi.classifyType(return_type, pt);
if (ty_classes[0] == .indirect) return true;
@@ -1711,10 +1715,9 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch {
/// For a given `Type`, will return true when the type will be passed
/// by reference, rather than by value
-fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
+fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
const mod = pt.zcu;
const ip = &mod.intern_pool;
- const target = mod.getTarget();
switch (ty.zigTypeTag(mod)) {
.Type,
.ComptimeInt,
@@ -1746,11 +1749,11 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
},
.Struct => {
if (mod.typeToPackedStruct(ty)) |packed_struct| {
- return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt);
+ return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt, target);
}
return ty.hasRuntimeBitsIgnoreComptime(pt);
},
- .Vector => return determineSimdStoreStrategy(ty, pt) == .unrolled,
+ .Vector => return determineSimdStoreStrategy(ty, pt, target) == .unrolled,
.Int => return ty.intInfo(mod).bits > 64,
.Enum => return ty.intInfo(mod).bits > 64,
.Float => return ty.floatBits(target) > 64,
@@ -1784,11 +1787,10 @@ const SimdStoreStrategy = enum {
/// This means when a given type is 128 bits and either the simd128 or relaxed-simd
/// features are enabled, the function will return `.direct`. This would allow to store
/// it using a instruction, rather than an unrolled version.
-fn determineSimdStoreStrategy(ty: Type, pt: Zcu.PerThread) SimdStoreStrategy {
+fn determineSimdStoreStrategy(ty: Type, pt: Zcu.PerThread, target: std.Target) SimdStoreStrategy {
std.debug.assert(ty.zigTypeTag(pt.zcu) == .Vector);
if (ty.bitSize(pt) != 128) return .unrolled;
const hasFeature = std.Target.wasm.featureSetHas;
- const target = pt.zcu.getTarget();
const features = target.cpu.features;
if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) {
return .direct;
@@ -2091,7 +2093,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try func.resolveInst(un_op);
- const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?;
+ const fn_info = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?;
const ret_ty = Type.fromInterned(fn_info.return_type);
// result must be stored in the stack and we return a pointer
@@ -2108,7 +2110,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.op = .load,
.width = @as(u8, @intCast(scalar_type.abiSize(pt) * 8)),
.signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
- .valtype1 = typeToValtype(scalar_type, pt),
+ .valtype1 = typeToValtype(scalar_type, pt, func.target.*),
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = operand.offset(),
@@ -2140,8 +2142,8 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try func.allocStack(Type.usize); // create pointer to void
}
- const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?;
- if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt)) {
+ const fn_info = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?;
+ if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) {
break :result func.return_value;
}
@@ -2158,12 +2160,12 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(un_op);
const ret_ty = func.typeOf(un_op).childType(mod);
- const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?;
+ const fn_info = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
if (ret_ty.isError(mod)) {
try func.addImm32(0);
}
- } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt)) {
+ } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) {
// leave on the stack
_ = try func.load(operand, ret_ty, 0);
}
@@ -2190,34 +2192,43 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
};
const ret_ty = fn_ty.fnReturnType(mod);
const fn_info = mod.typeToFunc(fn_ty).?;
- const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt);
+ const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*);
- const callee: ?InternPool.DeclIndex = blk: {
+ const callee: ?InternPool.Nav.Index = blk: {
const func_val = (try func.air.value(pl_op.operand, pt)) orelse break :blk null;
- if (func_val.getFunction(mod)) |function| {
- _ = try func.bin_file.getOrCreateAtomForDecl(pt, function.owner_decl);
- break :blk function.owner_decl;
- } else if (func_val.getExternFunc(mod)) |extern_func| {
- const ext_decl = mod.declPtr(extern_func.decl);
- const ext_info = mod.typeToFunc(ext_decl.typeOf(mod)).?;
- var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), Type.fromInterned(ext_info.return_type), pt);
- defer func_type.deinit(func.gpa);
- const atom_index = try func.bin_file.getOrCreateAtomForDecl(pt, extern_func.decl);
- const atom = func.bin_file.getAtomPtr(atom_index);
- const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type);
- try func.bin_file.addOrUpdateImport(
- ext_decl.name.toSlice(&mod.intern_pool),
- atom.sym_index,
- ext_decl.getOwnedExternFunc(mod).?.lib_name.toSlice(&mod.intern_pool),
- type_index,
- );
- break :blk extern_func.decl;
- } else switch (mod.intern_pool.indexToKey(func_val.ip_index)) {
+ switch (ip.indexToKey(func_val.toIntern())) {
+ .func => |function| {
+ _ = try func.bin_file.getOrCreateAtomForNav(pt, function.owner_nav);
+ break :blk function.owner_nav;
+ },
+ .@"extern" => |@"extern"| {
+ const ext_nav = ip.getNav(@"extern".owner_nav);
+ const ext_info = mod.typeToFunc(Type.fromInterned(@"extern".ty)).?;
+ var func_type = try genFunctype(
+ func.gpa,
+ ext_info.cc,
+ ext_info.param_types.get(ip),
+ Type.fromInterned(ext_info.return_type),
+ pt,
+ func.target.*,
+ );
+ defer func_type.deinit(func.gpa);
+ const atom_index = try func.bin_file.getOrCreateAtomForNav(pt, @"extern".owner_nav);
+ const atom = func.bin_file.getAtomPtr(atom_index);
+ const type_index = try func.bin_file.storeNavType(@"extern".owner_nav, func_type);
+ try func.bin_file.addOrUpdateImport(
+ ext_nav.name.toSlice(ip),
+ atom.sym_index,
+ @"extern".lib_name.toSlice(ip),
+ type_index,
+ );
+ break :blk @"extern".owner_nav;
+ },
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
- .decl => |decl| {
- _ = try func.bin_file.getOrCreateAtomForDecl(pt, decl);
- break :blk decl;
+ .nav => |nav| {
+ _ = try func.bin_file.getOrCreateAtomForNav(pt, nav);
+ break :blk nav;
},
else => {},
},
@@ -2242,7 +2253,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
}
if (callee) |direct| {
- const atom_index = func.bin_file.zigObjectPtr().?.decls_map.get(direct).?.atom;
+ const atom_index = func.bin_file.zigObjectPtr().?.navs.get(direct).?.atom;
try func.addLabel(.call, @intFromEnum(func.bin_file.getAtom(atom_index).sym_index));
} else {
// in this case we call a function pointer
@@ -2251,7 +2262,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const operand = try func.resolveInst(pl_op.operand);
try func.emitWValue(operand);
- var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt);
+ var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*);
defer fn_type.deinit(func.gpa);
const fn_type_index = try func.bin_file.zigObjectPtr().?.putOrGetFuncType(func.gpa, fn_type);
@@ -2315,7 +2326,7 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
// load the value, and then shift+or the rhs into the result location.
const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8);
- if (isByRef(int_elem_ty, pt)) {
+ if (isByRef(int_elem_ty, pt, func.target.*)) {
return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
}
@@ -2381,11 +2392,11 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Struct, .Array, .Union => if (isByRef(ty, pt)) {
+ .Struct, .Array, .Union => if (isByRef(ty, pt, func.target.*)) {
const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Vector => switch (determineSimdStoreStrategy(ty, pt)) {
+ .Vector => switch (determineSimdStoreStrategy(ty, pt, func.target.*)) {
.unrolled => {
const len: u32 = @intCast(abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
@@ -2443,7 +2454,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
// into lhs, so we calculate that and emit that instead
try func.lowerToStack(rhs);
- const valtype = typeToValtype(ty, pt);
+ const valtype = typeToValtype(ty, pt, func.target.*);
const opcode = buildOpcode(.{
.valtype1 = valtype,
.width = @as(u8, @intCast(abi_size * 8)),
@@ -2472,7 +2483,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return func.finishAir(inst, .none, &.{ty_op.operand});
const result = result: {
- if (isByRef(ty, pt)) {
+ if (isByRef(ty, pt, func.target.*)) {
const new_local = try func.allocStack(ty);
try func.store(new_local, operand, ty, 0);
break :result new_local;
@@ -2522,7 +2533,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
const abi_size: u8 = @intCast(ty.abiSize(pt));
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, pt),
+ .valtype1 = typeToValtype(ty, pt, func.target.*),
.width = abi_size * 8,
.op = .load,
.signedness = if (ty.isSignedInt(mod)) .signed else .unsigned,
@@ -2544,7 +2555,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = pt.zcu;
const arg_index = func.arg_index;
const arg = func.args[arg_index];
- const cc = mod.typeToFunc(func.decl.typeOf(mod)).?.cc;
+ const cc = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?.cc;
const arg_ty = func.typeOfIndex(inst);
if (cc == .C) {
const arg_classes = abi.classifyType(arg_ty, pt);
@@ -2577,7 +2588,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const name_nts = func.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
if (name_nts != .none) {
const name = func.air.nullTerminatedString(@intFromEnum(name_nts));
- try dwarf.genArgDbgInfo(name, arg_ty, mod.funcOwnerDeclIndex(func.func_index), .{
+ try dwarf.genArgDbgInfo(name, arg_ty, func.owner_nav, .{
.wasm_local = arg.local.value,
});
}
@@ -2631,7 +2642,7 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
return func.floatOp(float_op, ty, &.{ lhs, rhs });
}
- if (isByRef(ty, pt)) {
+ if (isByRef(ty, pt, func.target.*)) {
if (ty.zigTypeTag(mod) == .Int) {
return func.binOpBigInt(lhs, rhs, ty, op);
} else {
@@ -2644,7 +2655,7 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
const opcode: wasm.Opcode = buildOpcode(.{
.op = op,
- .valtype1 = typeToValtype(ty, pt),
+ .valtype1 = typeToValtype(ty, pt, func.target.*),
.signedness = if (ty.isSignedInt(mod)) .signed else .unsigned,
});
try func.emitWValue(lhs);
@@ -2896,7 +2907,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
return func.fail("TODO: Implement floatOps for vectors", .{});
}
- const float_bits = ty.floatBits(func.target);
+ const float_bits = ty.floatBits(func.target.*);
if (float_op == .neg) {
return func.floatNeg(ty, args[0]);
@@ -2907,7 +2918,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
for (args) |operand| {
try func.emitWValue(operand);
}
- const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, pt) });
+ const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, pt, func.target.*) });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
}
@@ -2955,7 +2966,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
/// NOTE: The result value remains on top of the stack.
fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
- const float_bits = ty.floatBits(func.target);
+ const float_bits = ty.floatBits(func.target.*);
switch (float_bits) {
16 => {
try func.emitWValue(arg);
@@ -3115,8 +3126,8 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
- .decl => |decl| return func.lowerDeclRefValue(decl, @intCast(offset)),
- .anon_decl => |ad| return func.lowerAnonDeclRef(ad, @intCast(offset)),
+ .nav => |nav| return func.lowerNavRef(nav, @intCast(offset)),
+ .uav => |uav| return func.lowerUavRef(uav, @intCast(offset)),
.int => return func.lowerConstant(try pt.intValue(Type.usize, offset), Type.usize),
.eu_payload => return func.fail("Wasm TODO: lower error union payload pointer", .{}),
.opt_payload => |opt_ptr| return func.lowerPtr(opt_ptr, offset),
@@ -3128,7 +3139,7 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr
assert(base_ty.isSlice(zcu));
break :off switch (field.index) {
Value.slice_ptr_index => 0,
- Value.slice_len_index => @divExact(zcu.getTarget().ptrBitWidth(), 8),
+ Value.slice_len_index => @divExact(func.target.ptrBitWidth(), 8),
else => unreachable,
};
},
@@ -3160,32 +3171,29 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr
};
}
-fn lowerAnonDeclRef(
+fn lowerUavRef(
func: *CodeGen,
- anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
+ uav: InternPool.Key.Ptr.BaseAddr.Uav,
offset: u32,
) InnerError!WValue {
const pt = func.pt;
const mod = pt.zcu;
- const decl_val = anon_decl.val;
- const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
+ const ty = Type.fromInterned(mod.intern_pool.typeOf(uav.val));
const is_fn_body = ty.zigTypeTag(mod) == .Fn;
if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(pt)) {
return .{ .imm32 = 0xaaaaaaaa };
}
- const decl_align = mod.intern_pool.indexToKey(anon_decl.orig_ty).ptr_type.flags.alignment;
- const res = try func.bin_file.lowerAnonDecl(pt, decl_val, decl_align, func.decl.navSrcLoc(mod));
- switch (res) {
- .ok => {},
- .fail => |em| {
- func.err_msg = em;
+ const decl_align = mod.intern_pool.indexToKey(uav.orig_ty).ptr_type.flags.alignment;
+ const res = try func.bin_file.lowerUav(pt, uav.val, decl_align, func.src_loc);
+ const target_sym_index = switch (res) {
+ .mcv => |mcv| mcv.load_symbol,
+ .fail => |err_msg| {
+ func.err_msg = err_msg;
return error.CodegenFail;
},
- }
- const target_atom_index = func.bin_file.zigObjectPtr().?.anon_decls.get(decl_val).?;
- const target_sym_index = @intFromEnum(func.bin_file.getAtom(target_atom_index).sym_index);
+ };
if (is_fn_body) {
return .{ .function_index = target_sym_index };
} else if (offset == 0) {
@@ -3193,32 +3201,29 @@ fn lowerAnonDeclRef(
} else return .{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } };
}
-fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u32) InnerError!WValue {
+fn lowerNavRef(func: *CodeGen, nav_index: InternPool.Nav.Index, offset: u32) InnerError!WValue {
const pt = func.pt;
const mod = pt.zcu;
+ const ip = &mod.intern_pool;
- const decl = mod.declPtr(decl_index);
// check if decl is an alias to a function, in which case we
// want to lower the actual decl, rather than the alias itself.
- if (decl.val.getFunction(mod)) |func_val| {
- if (func_val.owner_decl != decl_index) {
- return func.lowerDeclRefValue(func_val.owner_decl, offset);
- }
- } else if (decl.val.getExternFunc(mod)) |func_val| {
- if (func_val.decl != decl_index) {
- return func.lowerDeclRefValue(func_val.decl, offset);
- }
- }
- const decl_ty = decl.typeOf(mod);
- if (decl_ty.zigTypeTag(mod) != .Fn and !decl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const owner_nav = switch (ip.indexToKey(mod.navValue(nav_index).toIntern())) {
+ .func => |function| function.owner_nav,
+ .variable => |variable| variable.owner_nav,
+ .@"extern" => |@"extern"| @"extern".owner_nav,
+ else => nav_index,
+ };
+ const nav_ty = ip.getNav(owner_nav).typeOf(ip);
+ if (!ip.isFunctionType(nav_ty) and !Type.fromInterned(nav_ty).hasRuntimeBitsIgnoreComptime(pt)) {
return .{ .imm32 = 0xaaaaaaaa };
}
- const atom_index = try func.bin_file.getOrCreateAtomForDecl(pt, decl_index);
+ const atom_index = try func.bin_file.getOrCreateAtomForNav(pt, nav_index);
const atom = func.bin_file.getAtom(atom_index);
const target_sym_index = @intFromEnum(atom.sym_index);
- if (decl_ty.zigTypeTag(mod) == .Fn) {
+ if (ip.isFunctionType(nav_ty)) {
return .{ .function_index = target_sym_index };
} else if (offset == 0) {
return .{ .memory = target_sym_index };
@@ -3229,7 +3234,7 @@ fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u
fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
const pt = func.pt;
const mod = pt.zcu;
- assert(!isByRef(ty, pt));
+ assert(!isByRef(ty, pt, func.target.*));
const ip = &mod.intern_pool;
if (val.isUndefDeep(mod)) return func.emitUndefined(ty);
@@ -3268,7 +3273,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
} },
},
.variable,
- .extern_func,
+ .@"extern",
.func,
.enum_literal,
.empty_enum_value,
@@ -3325,16 +3330,12 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
.f64 => |f64_val| return .{ .float64 = f64_val },
else => unreachable,
},
- .slice => |slice| {
- var ptr = ip.indexToKey(slice.ptr).ptr;
- const owner_decl = while (true) switch (ptr.base_addr) {
- .decl => |decl| break decl,
- .int, .anon_decl => return func.fail("Wasm TODO: lower slice where ptr is not owned by decl", .{}),
- .opt_payload, .eu_payload => |base| ptr = ip.indexToKey(base).ptr,
- .field => |base_index| ptr = ip.indexToKey(base_index.base).ptr,
- .arr_elem, .comptime_field, .comptime_alloc => unreachable,
- };
- return .{ .memory = try func.bin_file.lowerUnnamedConst(pt, val, owner_decl) };
+ .slice => switch (try func.bin_file.lowerUav(pt, val.toIntern(), .none, func.src_loc)) {
+ .mcv => |mcv| return .{ .memory = mcv.load_symbol },
+ .fail => |err_msg| {
+ func.err_msg = err_msg;
+ return error.CodegenFail;
+ },
},
.ptr => return func.lowerPtr(val.toIntern(), 0),
.opt => if (ty.optionalReprIsPayload(mod)) {
@@ -3350,7 +3351,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
.aggregate => switch (ip.indexToKey(ty.ip_index)) {
.array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}),
.vector_type => {
- assert(determineSimdStoreStrategy(ty, pt) == .direct);
+ assert(determineSimdStoreStrategy(ty, pt, func.target.*) == .direct);
var buf: [16]u8 = undefined;
val.writeToMemory(ty, pt, &buf) catch unreachable;
return func.storeSimdImmd(buf);
@@ -3405,7 +3406,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
33...64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa },
else => unreachable,
},
- .Float => switch (ty.floatBits(func.target)) {
+ .Float => switch (ty.floatBits(func.target.*)) {
16 => return .{ .imm32 = 0xaaaaaaaa },
32 => return .{ .float32 = @as(f32, @bitCast(@as(u32, 0xaaaaaaaa))) },
64 => return .{ .float64 = @as(f64, @bitCast(@as(u64, 0xaaaaaaaaaaaaaaaa))) },
@@ -3480,11 +3481,11 @@ fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn lowerBlock(func: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []const Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const wasm_block_ty = genBlockType(block_ty, pt);
+ const wasm_block_ty = genBlockType(block_ty, pt, func.target.*);
// if wasm_block_ty is non-empty, we create a register to store the temporary value
const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: {
- const ty: Type = if (isByRef(block_ty, pt)) Type.u32 else block_ty;
+ const ty: Type = if (isByRef(block_ty, pt, func.target.*)) Type.u32 else block_ty;
break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten
} else .none;
@@ -3608,7 +3609,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
}
} else if (ty.isAnyFloat()) {
return func.cmpFloat(ty, lhs, rhs, op);
- } else if (isByRef(ty, pt)) {
+ } else if (isByRef(ty, pt, func.target.*)) {
return func.cmpBigInt(lhs, rhs, ty, op);
}
@@ -3626,7 +3627,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
try func.lowerToStack(rhs);
const opcode: wasm.Opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, pt),
+ .valtype1 = typeToValtype(ty, pt, func.target.*),
.op = switch (op) {
.lt => .lt,
.lte => .le,
@@ -3645,7 +3646,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
/// Compares two floats.
/// NOTE: Leaves the result of the comparison on top of the stack.
fn cmpFloat(func: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math.CompareOperator) InnerError!WValue {
- const float_bits = ty.floatBits(func.target);
+ const float_bits = ty.floatBits(func.target.*);
const op: Op = switch (cmp_op) {
.lt => .lt,
@@ -3829,7 +3830,7 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try func.bitcast(wanted_ty, given_ty, operand);
}
- if (isByRef(given_ty, pt) and !isByRef(wanted_ty, pt)) {
+ if (isByRef(given_ty, pt, func.target.*) and !isByRef(wanted_ty, pt, func.target.*)) {
const loaded_memory = try func.load(operand, wanted_ty, 0);
if (needs_wrapping) {
break :result try func.wrapOperand(loaded_memory, wanted_ty);
@@ -3837,7 +3838,7 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result loaded_memory;
}
}
- if (!isByRef(given_ty, pt) and isByRef(wanted_ty, pt)) {
+ if (!isByRef(given_ty, pt, func.target.*) and isByRef(wanted_ty, pt, func.target.*)) {
const stack_memory = try func.allocStack(wanted_ty);
try func.store(stack_memory, operand, given_ty, 0);
if (needs_wrapping) {
@@ -3867,8 +3868,8 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn
const opcode = buildOpcode(.{
.op = .reinterpret,
- .valtype1 = typeToValtype(wanted_ty, pt),
- .valtype2 = typeToValtype(given_ty, pt),
+ .valtype1 = typeToValtype(wanted_ty, pt, func.target.*),
+ .valtype2 = typeToValtype(given_ty, pt, func.target.*),
});
try func.emitWValue(operand);
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
@@ -3990,8 +3991,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try func.trunc(shifted_value, field_ty, backing_ty);
},
.Union => result: {
- if (isByRef(struct_ty, pt)) {
- if (!isByRef(field_ty, pt)) {
+ if (isByRef(struct_ty, pt, func.target.*)) {
+ if (!isByRef(field_ty, pt, func.target.*)) {
break :result try func.load(operand, field_ty, 0);
} else {
const new_stack_val = try func.allocStack(field_ty);
@@ -4017,7 +4018,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, pt)) orelse {
return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)});
};
- if (isByRef(field_ty, pt)) {
+ if (isByRef(field_ty, pt, func.target.*)) {
switch (operand) {
.stack_offset => |stack_offset| {
break :result .{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } };
@@ -4163,7 +4164,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const val = try func.lowerConstant(case.values[0].value, target_ty);
try func.emitWValue(val);
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(target_ty, pt),
+ .valtype1 = typeToValtype(target_ty, pt, func.target.*),
.op = .ne, // not equal, because we want to jump out of this block if it does not match the condition.
.signedness = signedness,
});
@@ -4177,7 +4178,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const val = try func.lowerConstant(value.value, target_ty);
try func.emitWValue(val);
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(target_ty, pt),
+ .valtype1 = typeToValtype(target_ty, pt, func.target.*),
.op = .eq,
.signedness = signedness,
});
@@ -4265,7 +4266,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
}
const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt)));
- if (op_is_ptr or isByRef(payload_ty, pt)) {
+ if (op_is_ptr or isByRef(payload_ty, pt, func.target.*)) {
break :result try func.buildPointerOffset(operand, pl_offset, .new);
}
@@ -4492,7 +4493,7 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand);
- if (isByRef(payload_ty, pt)) {
+ if (isByRef(payload_ty, pt, func.target.*)) {
break :result try func.buildPointerOffset(operand, 0, .new);
}
@@ -4626,7 +4627,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
- const elem_result = if (isByRef(elem_ty, pt))
+ const elem_result = if (isByRef(elem_ty, pt, func.target.*))
.stack
else
try func.load(.stack, elem_ty, 0);
@@ -4784,7 +4785,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
- const elem_result = if (isByRef(elem_ty, pt))
+ const elem_result = if (isByRef(elem_ty, pt, func.target.*))
.stack
else
try func.load(.stack, elem_ty, 0);
@@ -4835,7 +4836,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
else => ptr_ty.childType(mod),
};
- const valtype = typeToValtype(Type.usize, pt);
+ const valtype = typeToValtype(Type.usize, pt, func.target.*);
const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul });
const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op });
@@ -4982,7 +4983,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const elem_ty = array_ty.childType(mod);
const elem_size = elem_ty.abiSize(pt);
- if (isByRef(array_ty, pt)) {
+ if (isByRef(array_ty, pt, func.target.*)) {
try func.lowerToStack(array);
try func.emitWValue(index);
try func.addImm32(@intCast(elem_size));
@@ -5025,7 +5026,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
}
- const elem_result = if (isByRef(elem_ty, pt))
+ const elem_result = if (isByRef(elem_ty, pt, func.target.*))
.stack
else
try func.load(.stack, elem_ty, 0);
@@ -5040,7 +5041,7 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.typeOf(ty_op.operand);
- const op_bits = op_ty.floatBits(func.target);
+ const op_bits = op_ty.floatBits(func.target.*);
const dest_ty = func.typeOfIndex(inst);
const dest_info = dest_ty.intInfo(mod);
@@ -5069,8 +5070,8 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(operand);
const op = buildOpcode(.{
.op = .trunc,
- .valtype1 = typeToValtype(dest_ty, pt),
- .valtype2 = typeToValtype(op_ty, pt),
+ .valtype1 = typeToValtype(dest_ty, pt, func.target.*),
+ .valtype2 = typeToValtype(op_ty, pt, func.target.*),
.signedness = dest_info.signedness,
});
try func.addTag(Mir.Inst.Tag.fromOpcode(op));
@@ -5088,7 +5089,7 @@ fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const op_info = op_ty.intInfo(mod);
const dest_ty = func.typeOfIndex(inst);
- const dest_bits = dest_ty.floatBits(func.target);
+ const dest_bits = dest_ty.floatBits(func.target.*);
if (op_info.bits > 128) {
return func.fail("TODO: floatFromInt for integers/floats with bitsize {d} bits", .{op_info.bits});
@@ -5114,8 +5115,8 @@ fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(operand);
const op = buildOpcode(.{
.op = .convert,
- .valtype1 = typeToValtype(dest_ty, pt),
- .valtype2 = typeToValtype(op_ty, pt),
+ .valtype1 = typeToValtype(dest_ty, pt, func.target.*),
+ .valtype2 = typeToValtype(op_ty, pt, func.target.*),
.signedness = op_info.signedness,
});
try func.addTag(Mir.Inst.Tag.fromOpcode(op));
@@ -5131,7 +5132,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.typeOfIndex(inst);
const elem_ty = ty.childType(mod);
- if (determineSimdStoreStrategy(ty, pt) == .direct) blk: {
+ if (determineSimdStoreStrategy(ty, pt, func.target.*) == .direct) blk: {
switch (operand) {
// when the operand lives in the linear memory section, we can directly
// load and splat the value at once. Meaning we do not first have to load
@@ -5215,7 +5216,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const elem_size = child_ty.abiSize(pt);
// TODO: One of them could be by ref; handle in loop
- if (isByRef(func.typeOf(extra.a), pt) or isByRef(inst_ty, pt)) {
+ if (isByRef(func.typeOf(extra.a), pt, func.target.*) or isByRef(inst_ty, pt, func.target.*)) {
const result = try func.allocStack(inst_ty);
for (0..mask_len) |index| {
@@ -5291,7 +5292,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When the element type is by reference, we must copy the entire
// value. It is therefore safer to move the offset pointer and store
// each value individually, instead of using store offsets.
- if (isByRef(elem_ty, pt)) {
+ if (isByRef(elem_ty, pt, func.target.*)) {
// copy stack pointer into a temporary local, which is
// moved for each element to store each value in the right position.
const offset = try func.buildPointerOffset(result, 0, .new);
@@ -5321,7 +5322,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.Struct => switch (result_ty.containerLayout(mod)) {
.@"packed" => {
- if (isByRef(result_ty, pt)) {
+ if (isByRef(result_ty, pt, func.target.*)) {
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
}
const packed_struct = mod.typeToPackedStruct(result_ty).?;
@@ -5424,15 +5425,15 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (layout.tag_size == 0) {
break :result .none;
}
- assert(!isByRef(union_ty, pt));
+ assert(!isByRef(union_ty, pt, func.target.*));
break :result tag_int;
}
- if (isByRef(union_ty, pt)) {
+ if (isByRef(union_ty, pt, func.target.*)) {
const result_ptr = try func.allocStack(union_ty);
const payload = try func.resolveInst(extra.init);
if (layout.tag_align.compare(.gte, layout.payload_align)) {
- if (isByRef(field_ty, pt)) {
+ if (isByRef(field_ty, pt, func.target.*)) {
const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
try func.store(payload_ptr, payload, field_ty, 0);
} else {
@@ -5513,7 +5514,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
_ = try func.load(lhs, payload_ty, 0);
_ = try func.load(rhs, payload_ty, 0);
- const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, pt) });
+ const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, pt, func.target.*) });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
try func.addLabel(.br_if, 0);
@@ -5630,8 +5631,8 @@ fn airFpext(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Extends a float from a given `Type` to a larger wanted `Type`
/// NOTE: Leaves the result on the stack
fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
- const given_bits = given.floatBits(func.target);
- const wanted_bits = wanted.floatBits(func.target);
+ const given_bits = given.floatBits(func.target.*);
+ const wanted_bits = wanted.floatBits(func.target.*);
if (wanted_bits == 64 and given_bits == 32) {
try func.emitWValue(operand);
@@ -5674,8 +5675,8 @@ fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Truncates a float from a given `Type` to its wanted `Type`
/// NOTE: The result value remains on the stack
fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
- const given_bits = given.floatBits(func.target);
- const wanted_bits = wanted.floatBits(func.target);
+ const given_bits = given.floatBits(func.target.*);
+ const wanted_bits = wanted.floatBits(func.target.*);
if (wanted_bits == 32 and given_bits == 64) {
try func.emitWValue(operand);
@@ -6247,7 +6248,6 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
assert(op == .max or op == .min);
const pt = func.pt;
const mod = pt.zcu;
- const target = mod.getTarget();
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ty = func.typeOfIndex(inst);
@@ -6264,7 +6264,7 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
if (ty.zigTypeTag(mod) == .Float) {
var fn_name_buf: [64]u8 = undefined;
- const float_bits = ty.floatBits(target);
+ const float_bits = ty.floatBits(func.target.*);
const fn_name = std.fmt.bufPrint(&fn_name_buf, "{s}f{s}{s}", .{
target_util.libcFloatPrefix(float_bits),
@tagName(op),
@@ -6300,7 +6300,7 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const result = if (ty.floatBits(func.target) == 16) fl_result: {
+ const result = if (ty.floatBits(func.target.*) == 16) fl_result: {
const rhs_ext = try func.fpext(rhs, ty, Type.f32);
const lhs_ext = try func.fpext(lhs, ty, Type.f32);
const addend_ext = try func.fpext(addend, ty, Type.f32);
@@ -6457,8 +6457,6 @@ fn airDbgInlineBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) InnerError!void {
if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{});
- const pt = func.pt;
- const mod = pt.zcu;
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const ty = func.typeOf(pl_op.operand);
const operand = try func.resolveInst(pl_op.operand);
@@ -6468,14 +6466,14 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) InnerError!void
const name = func.air.nullTerminatedString(pl_op.payload);
log.debug(" var name = ({s})", .{name});
- const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (operand) {
+ const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (operand) {
.local => |local| .{ .wasm_local = local.value },
else => blk: {
log.debug("TODO generate debug info for {}", .{operand});
break :blk .nop;
},
};
- try func.debug_output.dwarf.genVarDbgInfo(name, ty, mod.funcOwnerDeclIndex(func.func_index), is_ptr, loc);
+ try func.debug_output.dwarf.genVarDbgInfo(name, ty, func.owner_nav, is_ptr, loc);
return func.finishAir(inst, .none, &.{});
}
@@ -6552,7 +6550,7 @@ fn lowerTry(
}
const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
- if (isByRef(pl_ty, pt)) {
+ if (isByRef(pl_ty, pt, func.target.*)) {
return buildPointerOffset(func, err_union, pl_offset, .new);
}
const payload = try func.load(err_union, pl_ty, pl_offset);
@@ -6712,7 +6710,7 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
_ = try func.wrapOperand(.stack, ty);
}
} else {
- const float_bits = ty.floatBits(func.target);
+ const float_bits = ty.floatBits(func.target.*);
if (float_bits > 64) {
return func.fail("TODO: `@divFloor` for floats with bitsize: {d}", .{float_bits});
}
@@ -7126,12 +7124,12 @@ fn callIntrinsic(
// Always pass over C-ABI
const pt = func.pt;
const mod = pt.zcu;
- var func_type = try genFunctype(func.gpa, .C, param_types, return_type, pt);
+ var func_type = try genFunctype(func.gpa, .C, param_types, return_type, pt, func.target.*);
defer func_type.deinit(func.gpa);
const func_type_index = try func.bin_file.zigObjectPtr().?.putOrGetFuncType(func.gpa, func_type);
try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index);
- const want_sret_param = firstParamSRet(.C, return_type, pt);
+ const want_sret_param = firstParamSRet(.C, return_type, pt, func.target.*);
// if we want return as first param, we allocate a pointer to stack,
// and emit it as our first argument
const sret = if (want_sret_param) blk: {
@@ -7181,14 +7179,12 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
const pt = func.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
- const enum_decl_index = enum_ty.getOwnerDecl(mod);
var arena_allocator = std.heap.ArenaAllocator.init(func.gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- const decl = mod.declPtr(enum_decl_index);
- const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{decl.fqn.fmt(ip)});
+ const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{ip.loadEnumType(enum_ty.toIntern()).name.fmt(ip)});
// check if we already generated code for this.
if (func.bin_file.findGlobalSymbol(func_name)) |loc| {
@@ -7232,11 +7228,13 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
.ty = name_ty.toIntern(),
.storage = .{ .bytes = tag_name.toString() },
} });
- const tag_sym_index = try func.bin_file.lowerUnnamedConst(
- pt,
- Value.fromInterned(name_val),
- enum_decl_index,
- );
+ const tag_sym_index = switch (try func.bin_file.lowerUav(pt, name_val, .none, func.src_loc)) {
+ .mcv => |mcv| mcv.load_symbol,
+ .fail => |err_msg| {
+ func.err_msg = err_msg;
+ return error.CodegenFail;
+ },
+ };
// block for this if case
try writer.writeByte(std.wasm.opcode(.block));
@@ -7333,7 +7331,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
try writer.writeByte(std.wasm.opcode(.end));
const slice_ty = Type.slice_const_u8_sentinel_0;
- const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, pt);
+ const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, pt, func.target.*);
const sym_index = try func.bin_file.createFunction(func_name, func_type, &body_list, &relocs);
return @intFromEnum(sym_index);
}
@@ -7477,7 +7475,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :val ptr_val;
};
- const result = if (isByRef(result_ty, pt)) val: {
+ const result = if (isByRef(result_ty, pt, func.target.*)) val: {
try func.emitWValue(cmp_result);
try func.addImm32(~@as(u32, 0));
try func.addTag(.i32_xor);
@@ -7706,8 +7704,7 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// Only when the atomic feature is enabled, and we're not building
// for a single-threaded build, can we emit the `fence` instruction.
// In all other cases, we emit no instructions for a fence.
- const func_namespace = zcu.namespacePtr(func.decl.src_namespace);
- const single_threaded = func_namespace.fileScope(zcu).mod.single_threaded;
+ const single_threaded = zcu.navFileScope(func.owner_nav).mod.single_threaded;
if (func.useAtomicFeature() and !single_threaded) {
try func.addAtomicTag(.atomic_fence);
}