aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
authormlugg <mlugg@mlugg.co.uk>2024-07-28 17:09:14 +0100
committermlugg <mlugg@mlugg.co.uk>2024-08-11 07:29:41 +0100
commit548a087fafeda5b07d2237d5137906b8d07da699 (patch)
tree69135f129b84ab5b65f443d0a52899b232696e2b /src/codegen
parent531cd177e89c1edfcd2e52f74f220eb186a25f78 (diff)
downloadzig-548a087fafeda5b07d2237d5137906b8d07da699.tar.gz
zig-548a087fafeda5b07d2237d5137906b8d07da699.zip
compiler: split Decl into Nav and Cau
The type `Zcu.Decl` in the compiler is problematic: over time it has gained many responsibilities. Every source declaration, container type, generic instantiation, and `@extern` has a `Decl`. The functions of these `Decl`s are in some cases entirely disjoint. After careful analysis, I determined that the two main responsibilities of `Decl` are as follows: * A `Decl` acts as the "subject" of semantic analysis at comptime. A single unit of analysis is either a runtime function body, or a `Decl`. It registers incremental dependencies, tracks analysis errors, etc. * A `Decl` acts as a "global variable": a pointer to it is consistent, and it may be lowered to a specific symbol by the codegen backend. This commit eliminates `Decl` and introduces new types to model these responsibilities: `Cau` (Comptime Analysis Unit) and `Nav` (Named Addressable Value). Every source declaration, and every container type requiring resolution (so *not* including `opaque`), has a `Cau`. For a source declaration, this `Cau` performs the resolution of its value. (When #131 is implemented, it is unsolved whether type and value resolution will share a `Cau` or have two distinct `Cau`s.) For a type, this `Cau` is the context in which type resolution occurs. Every non-`comptime` source declaration, every generic instantiation, and every distinct `extern` has a `Nav`. These are sent to codegen/link: the backends by definition do not care about `Cau`s. This commit has some minor technically-breaking changes surrounding `usingnamespace`. I don't think they'll impact anyone, since the changes are fixes around semantics which were previously inconsistent (the behavior changed depending on hashmap iteration order!). Aside from that, this changeset has no significant user-facing changes. Instead, it is an internal refactor which makes it easier to correctly model the responsibilities of different objects, particularly regarding incremental compilation. The performance impact should be negligible, but I will take measurements before merging this work into `master`. Co-authored-by: Jacob Young <jacobly0@users.noreply.github.com> Co-authored-by: Jakub Konka <kubkon@jakubkonka.com>
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/c.zig480
-rw-r--r--src/codegen/c/Type.zig73
-rw-r--r--src/codegen/llvm.zig848
-rw-r--r--src/codegen/spirv.zig586
4 files changed, 983 insertions, 1004 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 4dda4d083b..03a1ea3746 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -38,8 +38,8 @@ pub const CValue = union(enum) {
/// Index into a tuple's fields
field: usize,
/// By-value
- decl: InternPool.DeclIndex,
- decl_ref: InternPool.DeclIndex,
+ nav: InternPool.Nav.Index,
+ nav_ref: InternPool.Nav.Index,
/// An undefined value (cannot be dereferenced)
undef: Type,
/// Rendered as an identifier (using fmtIdent)
@@ -58,19 +58,12 @@ const BlockData = struct {
pub const CValueMap = std.AutoHashMap(Air.Inst.Ref, CValue);
pub const LazyFnKey = union(enum) {
- tag_name: InternPool.DeclIndex,
- never_tail: InternPool.DeclIndex,
- never_inline: InternPool.DeclIndex,
+ tag_name: InternPool.Index,
+ never_tail: InternPool.Nav.Index,
+ never_inline: InternPool.Nav.Index,
};
pub const LazyFnValue = struct {
fn_name: CType.Pool.String,
- data: Data,
-
- const Data = union {
- tag_name: Type,
- never_tail: void,
- never_inline: void,
- };
};
pub const LazyFnMap = std.AutoArrayHashMapUnmanaged(LazyFnKey, LazyFnValue);
@@ -498,10 +491,11 @@ pub const Function = struct {
return f.object.dg.fmtIntLiteral(val, .Other);
}
- fn getLazyFnName(f: *Function, key: LazyFnKey, data: LazyFnValue.Data) ![]const u8 {
+ fn getLazyFnName(f: *Function, key: LazyFnKey) ![]const u8 {
const gpa = f.object.dg.gpa;
const pt = f.object.dg.pt;
const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ctype_pool = &f.object.dg.ctype_pool;
const gop = try f.lazy_fns.getOrPut(gpa, key);
@@ -511,19 +505,19 @@ pub const Function = struct {
gop.value_ptr.* = .{
.fn_name = switch (key) {
.tag_name,
+ => |enum_ty| try ctype_pool.fmt(gpa, "zig_{s}_{}__{d}", .{
+ @tagName(key),
+ fmtIdent(ip.loadEnumType(enum_ty).name.toSlice(ip)),
+ @intFromEnum(enum_ty),
+ }),
.never_tail,
.never_inline,
- => |owner_decl| try ctype_pool.fmt(gpa, "zig_{s}_{}__{d}", .{
+ => |owner_nav| try ctype_pool.fmt(gpa, "zig_{s}_{}__{d}", .{
@tagName(key),
- fmtIdent(zcu.declPtr(owner_decl).name.toSlice(&zcu.intern_pool)),
- @intFromEnum(owner_decl),
+ fmtIdent(ip.getNav(owner_nav).name.toSlice(ip)),
+ @intFromEnum(owner_nav),
}),
},
- .data = switch (key) {
- .tag_name => .{ .tag_name = data.tag_name },
- .never_tail => .{ .never_tail = data.never_tail },
- .never_inline => .{ .never_inline = data.never_inline },
- },
};
}
return gop.value_ptr.fn_name.toSlice(ctype_pool).?;
@@ -618,12 +612,12 @@ pub const DeclGen = struct {
scratch: std.ArrayListUnmanaged(u32),
/// Keeps track of anonymous decls that need to be rendered before this
/// (named) Decl in the output C code.
- anon_decl_deps: std.AutoArrayHashMapUnmanaged(InternPool.Index, C.DeclBlock),
- aligned_anon_decls: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment),
+ uav_deps: std.AutoArrayHashMapUnmanaged(InternPool.Index, C.AvBlock),
+ aligned_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment),
pub const Pass = union(enum) {
- decl: InternPool.DeclIndex,
- anon: InternPool.Index,
+ nav: InternPool.Nav.Index,
+ uav: InternPool.Index,
flush,
};
@@ -634,39 +628,37 @@ pub const DeclGen = struct {
fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
@setCold(true);
const zcu = dg.pt.zcu;
- const decl_index = dg.pass.decl;
- const decl = zcu.declPtr(decl_index);
- const src_loc = decl.navSrcLoc(zcu);
+ const src_loc = zcu.navSrcLoc(dg.pass.nav);
dg.error_msg = try Zcu.ErrorMsg.create(dg.gpa, src_loc, format, args);
return error.AnalysisFail;
}
- fn renderAnonDeclValue(
+ fn renderUav(
dg: *DeclGen,
writer: anytype,
- anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
+ uav: InternPool.Key.Ptr.BaseAddr.Uav,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
const pt = dg.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ctype_pool = &dg.ctype_pool;
- const decl_val = Value.fromInterned(anon_decl.val);
- const decl_ty = decl_val.typeOf(zcu);
+ const uav_val = Value.fromInterned(uav.val);
+ const uav_ty = uav_val.typeOf(zcu);
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
- const ptr_ty = Type.fromInterned(anon_decl.orig_ty);
- if (ptr_ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(pt)) {
+ const ptr_ty = Type.fromInterned(uav.orig_ty);
+ if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(pt)) {
return dg.writeCValue(writer, .{ .undef = ptr_ty });
}
// Chase function values in order to be able to reference the original function.
- if (decl_val.getFunction(zcu)) |func|
- return dg.renderDeclValue(writer, func.owner_decl, location);
- if (decl_val.getExternFunc(zcu)) |extern_func|
- return dg.renderDeclValue(writer, extern_func.decl, location);
-
- assert(decl_val.getVariable(zcu) == null);
+ switch (ip.indexToKey(uav.val)) {
+ .variable => unreachable,
+ .func => |func| return dg.renderNav(writer, func.owner_nav, location),
+ .@"extern" => |@"extern"| return dg.renderNav(writer, @"extern".owner_nav, location),
+ else => {},
+ }
// We shouldn't cast C function pointers as this is UB (when you call
// them). The analysis until now should ensure that the C function
@@ -674,22 +666,22 @@ pub const DeclGen = struct {
// somewhere and we should let the C compiler tell us about it.
const ptr_ctype = try dg.ctypeFromType(ptr_ty, .complete);
const elem_ctype = ptr_ctype.info(ctype_pool).pointer.elem_ctype;
- const decl_ctype = try dg.ctypeFromType(decl_ty, .complete);
- const need_cast = !elem_ctype.eql(decl_ctype) and
- (elem_ctype.info(ctype_pool) != .function or decl_ctype.info(ctype_pool) != .function);
+ const uav_ctype = try dg.ctypeFromType(uav_ty, .complete);
+ const need_cast = !elem_ctype.eql(uav_ctype) and
+ (elem_ctype.info(ctype_pool) != .function or uav_ctype.info(ctype_pool) != .function);
if (need_cast) {
try writer.writeAll("((");
try dg.renderCType(writer, ptr_ctype);
try writer.writeByte(')');
}
try writer.writeByte('&');
- try renderAnonDeclName(writer, decl_val);
+ try renderUavName(writer, uav_val);
if (need_cast) try writer.writeByte(')');
// Indicate that the anon decl should be rendered to the output so that
// our reference above is not undefined.
- const ptr_type = ip.indexToKey(anon_decl.orig_ty).ptr_type;
- const gop = try dg.anon_decl_deps.getOrPut(dg.gpa, anon_decl.val);
+ const ptr_type = ip.indexToKey(uav.orig_ty).ptr_type;
+ const gop = try dg.uav_deps.getOrPut(dg.gpa, uav.val);
if (!gop.found_existing) gop.value_ptr.* = .{};
// Only insert an alignment entry if the alignment is greater than ABI
@@ -698,7 +690,7 @@ pub const DeclGen = struct {
if (explicit_alignment != .none) {
const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(pt);
if (explicit_alignment.order(abi_alignment).compare(.gt)) {
- const aligned_gop = try dg.aligned_anon_decls.getOrPut(dg.gpa, anon_decl.val);
+ const aligned_gop = try dg.aligned_uavs.getOrPut(dg.gpa, uav.val);
aligned_gop.value_ptr.* = if (aligned_gop.found_existing)
aligned_gop.value_ptr.maxStrict(explicit_alignment)
else
@@ -707,47 +699,49 @@ pub const DeclGen = struct {
}
}
- fn renderDeclValue(
+ fn renderNav(
dg: *DeclGen,
writer: anytype,
- decl_index: InternPool.DeclIndex,
+ nav_index: InternPool.Nav.Index,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
+ _ = location;
const pt = dg.pt;
const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ctype_pool = &dg.ctype_pool;
- const decl = zcu.declPtr(decl_index);
- assert(decl.has_tv);
+
+ // Chase function values in order to be able to reference the original function.
+ const owner_nav = switch (ip.indexToKey(zcu.navValue(nav_index).toIntern())) {
+ .variable => |variable| variable.owner_nav,
+ .func => |func| func.owner_nav,
+ .@"extern" => |@"extern"| @"extern".owner_nav,
+ else => nav_index,
+ };
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
- const decl_ty = decl.typeOf(zcu);
- const ptr_ty = try decl.declPtrType(pt);
- if (!decl_ty.isFnOrHasRuntimeBits(pt)) {
+ const nav_ty = Type.fromInterned(ip.getNav(owner_nav).typeOf(ip));
+ const ptr_ty = try pt.navPtrType(owner_nav);
+ if (!nav_ty.isFnOrHasRuntimeBits(pt)) {
return dg.writeCValue(writer, .{ .undef = ptr_ty });
}
- // Chase function values in order to be able to reference the original function.
- if (decl.val.getFunction(zcu)) |func| if (func.owner_decl != decl_index)
- return dg.renderDeclValue(writer, func.owner_decl, location);
- if (decl.val.getExternFunc(zcu)) |extern_func| if (extern_func.decl != decl_index)
- return dg.renderDeclValue(writer, extern_func.decl, location);
-
// We shouldn't cast C function pointers as this is UB (when you call
// them). The analysis until now should ensure that the C function
// pointers are compatible. If they are not, then there is a bug
// somewhere and we should let the C compiler tell us about it.
const ctype = try dg.ctypeFromType(ptr_ty, .complete);
const elem_ctype = ctype.info(ctype_pool).pointer.elem_ctype;
- const decl_ctype = try dg.ctypeFromType(decl_ty, .complete);
- const need_cast = !elem_ctype.eql(decl_ctype) and
- (elem_ctype.info(ctype_pool) != .function or decl_ctype.info(ctype_pool) != .function);
+ const nav_ctype = try dg.ctypeFromType(nav_ty, .complete);
+ const need_cast = !elem_ctype.eql(nav_ctype) and
+ (elem_ctype.info(ctype_pool) != .function or nav_ctype.info(ctype_pool) != .function);
if (need_cast) {
try writer.writeAll("((");
try dg.renderCType(writer, ctype);
try writer.writeByte(')');
}
try writer.writeByte('&');
- try dg.renderDeclName(writer, decl_index);
+ try dg.renderNavName(writer, owner_nav);
if (need_cast) try writer.writeByte(')');
}
@@ -769,8 +763,8 @@ pub const DeclGen = struct {
try writer.print("){x}", .{try dg.fmtIntLiteral(addr_val, .Other)});
},
- .decl_ptr => |decl| try dg.renderDeclValue(writer, decl, location),
- .anon_decl_ptr => |ad| try dg.renderAnonDeclValue(writer, ad, location),
+ .nav_ptr => |nav| try dg.renderNav(writer, nav, location),
+ .uav_ptr => |uav| try dg.renderUav(writer, uav, location),
inline .eu_payload_ptr, .opt_payload_ptr => |info| {
try writer.writeAll("&(");
@@ -918,7 +912,7 @@ pub const DeclGen = struct {
.true => try writer.writeAll("true"),
},
.variable,
- .extern_func,
+ .@"extern",
.func,
.enum_literal,
.empty_enum_value,
@@ -1743,7 +1737,7 @@ pub const DeclGen = struct {
.undef,
.simple_value,
.variable,
- .extern_func,
+ .@"extern",
.func,
.int,
.err,
@@ -1758,7 +1752,7 @@ pub const DeclGen = struct {
.aggregate,
.un,
.memoized_call,
- => unreachable,
+ => unreachable, // values, not types
},
}
}
@@ -1770,7 +1764,7 @@ pub const DeclGen = struct {
fn_align: InternPool.Alignment,
kind: CType.Kind,
name: union(enum) {
- decl: InternPool.DeclIndex,
+ nav: InternPool.Nav.Index,
fmt_ctype_pool_string: std.fmt.Formatter(formatCTypePoolString),
@"export": struct {
main_name: InternPool.NullTerminatedString,
@@ -1805,7 +1799,7 @@ pub const DeclGen = struct {
try w.print("{}", .{trailing});
switch (name) {
- .decl => |decl_index| try dg.renderDeclName(w, decl_index),
+ .nav => |nav| try dg.renderNavName(w, nav),
.fmt_ctype_pool_string => |fmt| try w.print("{ }", .{fmt}),
.@"export" => |@"export"| try w.print("{ }", .{fmtIdent(@"export".extern_name.toSlice(ip))}),
}
@@ -1828,7 +1822,7 @@ pub const DeclGen = struct {
.forward => {
if (fn_align.toByteUnits()) |a| try w.print(" zig_align_fn({})", .{a});
switch (name) {
- .decl, .fmt_ctype_pool_string => {},
+ .nav, .fmt_ctype_pool_string => {},
.@"export" => |@"export"| {
const extern_name = @"export".extern_name.toSlice(ip);
const is_mangled = isMangledIdent(extern_name, true);
@@ -2069,8 +2063,8 @@ pub const DeclGen = struct {
fn writeName(dg: *DeclGen, w: anytype, c_value: CValue) !void {
switch (c_value) {
.new_local, .local => |i| try w.print("t{d}", .{i}),
- .constant => |val| try renderAnonDeclName(w, val),
- .decl => |decl| try dg.renderDeclName(w, decl),
+ .constant => |uav| try renderUavName(w, uav),
+ .nav => |nav| try dg.renderNavName(w, nav),
.identifier => |ident| try w.print("{ }", .{fmtIdent(ident)}),
else => unreachable,
}
@@ -2079,13 +2073,13 @@ pub const DeclGen = struct {
fn writeCValue(dg: *DeclGen, w: anytype, c_value: CValue) !void {
switch (c_value) {
.none, .new_local, .local, .local_ref => unreachable,
- .constant => |val| try renderAnonDeclName(w, val),
+ .constant => |uav| try renderUavName(w, uav),
.arg, .arg_array => unreachable,
.field => |i| try w.print("f{d}", .{i}),
- .decl => |decl| try dg.renderDeclName(w, decl),
- .decl_ref => |decl| {
+ .nav => |nav| try dg.renderNavName(w, nav),
+ .nav_ref => |nav| {
try w.writeByte('&');
- try dg.renderDeclName(w, decl);
+ try dg.renderNavName(w, nav);
},
.undef => |ty| try dg.renderUndefValue(w, ty, .Other),
.identifier => |ident| try w.print("{ }", .{fmtIdent(ident)}),
@@ -2111,12 +2105,12 @@ pub const DeclGen = struct {
.ctype_pool_string,
=> unreachable,
.field => |i| try w.print("f{d}", .{i}),
- .decl => |decl| {
+ .nav => |nav| {
try w.writeAll("(*");
- try dg.renderDeclName(w, decl);
+ try dg.renderNavName(w, nav);
try w.writeByte(')');
},
- .decl_ref => |decl| try dg.renderDeclName(w, decl),
+ .nav_ref => |nav| try dg.renderNavName(w, nav),
.undef => unreachable,
.identifier => |ident| try w.print("(*{ })", .{fmtIdent(ident)}),
.payload_identifier => |ident| try w.print("(*{ }.{ })", .{
@@ -2150,11 +2144,11 @@ pub const DeclGen = struct {
.arg_array,
.ctype_pool_string,
=> unreachable,
- .decl, .identifier, .payload_identifier => {
+ .nav, .identifier, .payload_identifier => {
try dg.writeCValue(writer, c_value);
try writer.writeAll("->");
},
- .decl_ref => {
+ .nav_ref => {
try dg.writeCValueDeref(writer, c_value);
try writer.writeByte('.');
},
@@ -2164,46 +2158,53 @@ pub const DeclGen = struct {
fn renderFwdDecl(
dg: *DeclGen,
- decl_index: InternPool.DeclIndex,
- variable: InternPool.Key.Variable,
+ nav_index: InternPool.Nav.Index,
+ flags: struct {
+ is_extern: bool,
+ is_const: bool,
+ is_threadlocal: bool,
+ is_weak_linkage: bool,
+ },
) !void {
const zcu = dg.pt.zcu;
- const decl = zcu.declPtr(decl_index);
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(nav_index);
const fwd = dg.fwdDeclWriter();
- try fwd.writeAll(if (variable.is_extern) "zig_extern " else "static ");
- if (variable.is_weak_linkage) try fwd.writeAll("zig_weak_linkage ");
- if (variable.is_threadlocal and !dg.mod.single_threaded) try fwd.writeAll("zig_threadlocal ");
+ try fwd.writeAll(if (flags.is_extern) "zig_extern " else "static ");
+ if (flags.is_weak_linkage) try fwd.writeAll("zig_weak_linkage ");
+ if (flags.is_threadlocal and !dg.mod.single_threaded) try fwd.writeAll("zig_threadlocal ");
try dg.renderTypeAndName(
fwd,
- decl.typeOf(zcu),
- .{ .decl = decl_index },
- CQualifiers.init(.{ .@"const" = variable.is_const }),
- decl.alignment,
+ Type.fromInterned(nav.typeOf(ip)),
+ .{ .nav = nav_index },
+ CQualifiers.init(.{ .@"const" = flags.is_const }),
+ nav.status.resolved.alignment,
.complete,
);
try fwd.writeAll(";\n");
}
- fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex) !void {
+ fn renderNavName(dg: *DeclGen, writer: anytype, nav_index: InternPool.Nav.Index) !void {
const zcu = dg.pt.zcu;
const ip = &zcu.intern_pool;
- const decl = zcu.declPtr(decl_index);
-
- if (decl.getExternDecl(zcu).unwrap()) |extern_decl_index| try writer.print("{ }", .{
- fmtIdent(zcu.declPtr(extern_decl_index).name.toSlice(ip)),
- }) else {
- // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case),
- // expand to 3x the length of its input, but let's cut it off at a much shorter limit.
- const fqn_slice = decl.fqn.toSlice(ip);
- try writer.print("{}__{d}", .{
- fmtIdent(fqn_slice[0..@min(fqn_slice.len, 100)]),
- @intFromEnum(decl_index),
- });
+ switch (ip.indexToKey(zcu.navValue(nav_index).toIntern())) {
+ .@"extern" => |@"extern"| try writer.print("{ }", .{
+ fmtIdent(ip.getNav(@"extern".owner_nav).name.toSlice(ip)),
+ }),
+ else => {
+ // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case),
+ // expand to 3x the length of its input, but let's cut it off at a much shorter limit.
+ const fqn_slice = ip.getNav(nav_index).fqn.toSlice(ip);
+ try writer.print("{}__{d}", .{
+ fmtIdent(fqn_slice[0..@min(fqn_slice.len, 100)]),
+ @intFromEnum(nav_index),
+ });
+ },
}
}
- fn renderAnonDeclName(writer: anytype, anon_decl_val: Value) !void {
- try writer.print("__anon_{d}", .{@intFromEnum(anon_decl_val.toIntern())});
+ fn renderUavName(writer: anytype, uav: Value) !void {
+ try writer.print("__anon_{d}", .{@intFromEnum(uav.toIntern())});
}
fn renderTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, ty: Type) !void {
@@ -2301,12 +2302,13 @@ fn renderFwdDeclTypeName(
fwd_decl: CType.Info.FwdDecl,
attributes: []const u8,
) !void {
+ const ip = &zcu.intern_pool;
try w.print("{s} {s}", .{ @tagName(fwd_decl.tag), attributes });
switch (fwd_decl.name) {
.anon => try w.print("anon__lazy_{d}", .{@intFromEnum(ctype.index)}),
- .owner_decl => |owner_decl| try w.print("{}__{d}", .{
- fmtIdent(zcu.declPtr(owner_decl).name.toSlice(&zcu.intern_pool)),
- @intFromEnum(owner_decl),
+ .index => |index| try w.print("{}__{d}", .{
+ fmtIdent(Type.fromInterned(index).containerTypeName(ip).toSlice(&zcu.intern_pool)),
+ @intFromEnum(index),
}),
}
}
@@ -2340,11 +2342,11 @@ fn renderTypePrefix(
},
.aligned => switch (pass) {
- .decl => |decl_index| try w.print("decl__{d}_{d}", .{
- @intFromEnum(decl_index), @intFromEnum(ctype.index),
+ .nav => |nav| try w.print("nav__{d}_{d}", .{
+ @intFromEnum(nav), @intFromEnum(ctype.index),
}),
- .anon => |anon_decl| try w.print("anon__{d}_{d}", .{
- @intFromEnum(anon_decl), @intFromEnum(ctype.index),
+ .uav => |uav| try w.print("uav__{d}_{d}", .{
+ @intFromEnum(uav), @intFromEnum(ctype.index),
}),
.flush => try renderAlignedTypeName(w, ctype),
},
@@ -2370,15 +2372,15 @@ fn renderTypePrefix(
.fwd_decl => |fwd_decl_info| switch (fwd_decl_info.name) {
.anon => switch (pass) {
- .decl => |decl_index| try w.print("decl__{d}_{d}", .{
- @intFromEnum(decl_index), @intFromEnum(ctype.index),
+ .nav => |nav| try w.print("nav__{d}_{d}", .{
+ @intFromEnum(nav), @intFromEnum(ctype.index),
}),
- .anon => |anon_decl| try w.print("anon__{d}_{d}", .{
- @intFromEnum(anon_decl), @intFromEnum(ctype.index),
+ .uav => |uav| try w.print("uav__{d}_{d}", .{
+ @intFromEnum(uav), @intFromEnum(ctype.index),
}),
.flush => try renderFwdDeclTypeName(zcu, w, ctype, fwd_decl_info, ""),
},
- .owner_decl => try renderFwdDeclTypeName(zcu, w, ctype, fwd_decl_info, ""),
+ .index => try renderFwdDeclTypeName(zcu, w, ctype, fwd_decl_info, ""),
},
.aggregate => |aggregate_info| switch (aggregate_info.name) {
@@ -2557,7 +2559,7 @@ pub fn genTypeDecl(
try writer.writeAll(";\n");
}
switch (pass) {
- .decl, .anon => {
+ .nav, .uav => {
try writer.writeAll("typedef ");
_ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{});
try writer.writeByte(' ');
@@ -2569,7 +2571,7 @@ pub fn genTypeDecl(
},
.fwd_decl => |fwd_decl_info| switch (fwd_decl_info.name) {
.anon => switch (pass) {
- .decl, .anon => {
+ .nav, .uav => {
try writer.writeAll("typedef ");
_ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{});
try writer.writeByte(' ');
@@ -2578,13 +2580,14 @@ pub fn genTypeDecl(
},
.flush => {},
},
- .owner_decl => |owner_decl_index| if (!found_existing) {
+ .index => |index| if (!found_existing) {
+ const ip = &zcu.intern_pool;
+ const ty = Type.fromInterned(index);
_ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{});
try writer.writeByte(';');
- const owner_decl = zcu.declPtr(owner_decl_index);
- const owner_mod = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu).mod;
- if (!owner_mod.strip) try writer.print(" /* {} */", .{
- owner_decl.fqn.fmt(&zcu.intern_pool),
+ const file_scope = ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file;
+ if (!zcu.fileByIndex(file_scope).mod.strip) try writer.print(" /* {} */", .{
+ ty.containerTypeName(ip).fmt(ip),
});
try writer.writeByte('\n');
},
@@ -2709,9 +2712,8 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn
const key = lazy_fn.key_ptr.*;
const val = lazy_fn.value_ptr;
switch (key) {
- .tag_name => {
- const enum_ty = val.data.tag_name;
-
+ .tag_name => |enum_ty_ip| {
+ const enum_ty = Type.fromInterned(enum_ty_ip);
const name_slice_ty = Type.slice_const_u8_sentinel_0;
try w.writeAll("static ");
@@ -2756,25 +2758,25 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn
_ = try airBreakpoint(w);
try w.writeAll("}\n");
},
- .never_tail, .never_inline => |fn_decl_index| {
- const fn_decl = zcu.declPtr(fn_decl_index);
- const fn_ctype = try o.dg.ctypeFromType(fn_decl.typeOf(zcu), .complete);
+ .never_tail, .never_inline => |fn_nav_index| {
+ const fn_val = zcu.navValue(fn_nav_index);
+ const fn_ctype = try o.dg.ctypeFromType(fn_val.typeOf(zcu), .complete);
const fn_info = fn_ctype.info(ctype_pool).function;
const fn_name = fmtCTypePoolString(val.fn_name, lazy_ctype_pool);
const fwd = o.dg.fwdDeclWriter();
try fwd.print("static zig_{s} ", .{@tagName(key)});
- try o.dg.renderFunctionSignature(fwd, fn_decl.val, fn_decl.alignment, .forward, .{
+ try o.dg.renderFunctionSignature(fwd, fn_val, ip.getNav(fn_nav_index).status.resolved.alignment, .forward, .{
.fmt_ctype_pool_string = fn_name,
});
try fwd.writeAll(";\n");
try w.print("zig_{s} ", .{@tagName(key)});
- try o.dg.renderFunctionSignature(w, fn_decl.val, .none, .complete, .{
+ try o.dg.renderFunctionSignature(w, fn_val, .none, .complete, .{
.fmt_ctype_pool_string = fn_name,
});
try w.writeAll(" {\n return ");
- try o.dg.renderDeclName(w, fn_decl_index);
+ try o.dg.renderNavName(w, fn_nav_index);
try w.writeByte('(');
for (0..fn_info.param_ctypes.len) |arg| {
if (arg > 0) try w.writeAll(", ");
@@ -2791,9 +2793,11 @@ pub fn genFunc(f: *Function) !void {
const o = &f.object;
const zcu = o.dg.pt.zcu;
+ const ip = &zcu.intern_pool;
const gpa = o.dg.gpa;
- const decl_index = o.dg.pass.decl;
- const decl = zcu.declPtr(decl_index);
+ const nav_index = o.dg.pass.nav;
+ const nav_val = zcu.navValue(nav_index);
+ const nav = ip.getNav(nav_index);
o.code_header = std.ArrayList(u8).init(gpa);
defer o.code_header.deinit();
@@ -2802,21 +2806,21 @@ pub fn genFunc(f: *Function) !void {
try fwd.writeAll("static ");
try o.dg.renderFunctionSignature(
fwd,
- decl.val,
- decl.alignment,
+ nav_val,
+ nav.status.resolved.alignment,
.forward,
- .{ .decl = decl_index },
+ .{ .nav = nav_index },
);
try fwd.writeAll(";\n");
- if (decl.@"linksection".toSlice(&zcu.intern_pool)) |s|
+ if (nav.status.resolved.@"linksection".toSlice(ip)) |s|
try o.writer().print("zig_linksection_fn({s}) ", .{fmtStringLiteral(s, null)});
try o.dg.renderFunctionSignature(
o.writer(),
- decl.val,
+ nav_val,
.none,
.complete,
- .{ .decl = decl_index },
+ .{ .nav = nav_index },
);
try o.writer().writeByte(' ');
@@ -2883,44 +2887,66 @@ pub fn genDecl(o: *Object) !void {
const pt = o.dg.pt;
const zcu = pt.zcu;
- const decl_index = o.dg.pass.decl;
- const decl = zcu.declPtr(decl_index);
- const decl_ty = decl.typeOf(zcu);
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(o.dg.pass.nav);
+ const nav_ty = Type.fromInterned(nav.typeOf(ip));
+
+ if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return;
+ switch (ip.indexToKey(nav.status.resolved.val)) {
+ .@"extern" => |@"extern"| {
+ if (!ip.isFunctionType(nav_ty.toIntern())) return o.dg.renderFwdDecl(o.dg.pass.nav, .{
+ .is_extern = true,
+ .is_const = @"extern".is_const,
+ .is_threadlocal = @"extern".is_threadlocal,
+ .is_weak_linkage = @"extern".is_weak_linkage,
+ });
- if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return;
- if (decl.val.getExternFunc(zcu)) |_| {
- const fwd = o.dg.fwdDeclWriter();
- try fwd.writeAll("zig_extern ");
- try o.dg.renderFunctionSignature(
- fwd,
- decl.val,
- decl.alignment,
- .forward,
- .{ .@"export" = .{
- .main_name = decl.name,
- .extern_name = decl.name,
- } },
- );
- try fwd.writeAll(";\n");
- } else if (decl.val.getVariable(zcu)) |variable| {
- try o.dg.renderFwdDecl(decl_index, variable);
-
- if (variable.is_extern) return;
-
- const w = o.writer();
- if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage ");
- if (variable.is_threadlocal and !o.dg.mod.single_threaded) try w.writeAll("zig_threadlocal ");
- if (decl.@"linksection".toSlice(&zcu.intern_pool)) |s|
- try w.print("zig_linksection({s}) ", .{fmtStringLiteral(s, null)});
- const decl_c_value = .{ .decl = decl_index };
- try o.dg.renderTypeAndName(w, decl_ty, decl_c_value, .{}, decl.alignment, .complete);
- try w.writeAll(" = ");
- try o.dg.renderValue(w, Value.fromInterned(variable.init), .StaticInitializer);
- try w.writeByte(';');
- try o.indent_writer.insertNewline();
- } else {
- const decl_c_value = .{ .decl = decl_index };
- try genDeclValue(o, decl.val, decl_c_value, decl.alignment, decl.@"linksection");
+ const fwd = o.dg.fwdDeclWriter();
+ try fwd.writeAll("zig_extern ");
+ try o.dg.renderFunctionSignature(
+ fwd,
+ Value.fromInterned(nav.status.resolved.val),
+ nav.status.resolved.alignment,
+ .forward,
+ .{ .@"export" = .{
+ .main_name = nav.name,
+ .extern_name = nav.name,
+ } },
+ );
+ try fwd.writeAll(";\n");
+ },
+ .variable => |variable| {
+ try o.dg.renderFwdDecl(o.dg.pass.nav, .{
+ .is_extern = false,
+ .is_const = false,
+ .is_threadlocal = variable.is_threadlocal,
+ .is_weak_linkage = variable.is_weak_linkage,
+ });
+ const w = o.writer();
+ if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage ");
+ if (variable.is_threadlocal and !o.dg.mod.single_threaded) try w.writeAll("zig_threadlocal ");
+ if (nav.status.resolved.@"linksection".toSlice(&zcu.intern_pool)) |s|
+ try w.print("zig_linksection({s}) ", .{fmtStringLiteral(s, null)});
+ try o.dg.renderTypeAndName(
+ w,
+ nav_ty,
+ .{ .nav = o.dg.pass.nav },
+ .{},
+ nav.status.resolved.alignment,
+ .complete,
+ );
+ try w.writeAll(" = ");
+ try o.dg.renderValue(w, Value.fromInterned(variable.init), .StaticInitializer);
+ try w.writeByte(';');
+ try o.indent_writer.insertNewline();
+ },
+ else => try genDeclValue(
+ o,
+ Value.fromInterned(nav.status.resolved.val),
+ .{ .nav = o.dg.pass.nav },
+ nav.status.resolved.alignment,
+ nav.status.resolved.@"linksection",
+ ),
}
}
@@ -2956,31 +2982,34 @@ pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const
const main_name = zcu.all_exports.items[export_indices[0]].opts.name;
try fwd.writeAll("#define ");
switch (exported) {
- .decl_index => |decl_index| try dg.renderDeclName(fwd, decl_index),
- .value => |value| try DeclGen.renderAnonDeclName(fwd, Value.fromInterned(value)),
+ .nav => |nav| try dg.renderNavName(fwd, nav),
+ .uav => |uav| try DeclGen.renderUavName(fwd, Value.fromInterned(uav)),
}
try fwd.writeByte(' ');
try fwd.print("{ }", .{fmtIdent(main_name.toSlice(ip))});
try fwd.writeByte('\n');
- const is_const = switch (ip.indexToKey(exported.getValue(zcu).toIntern())) {
- .func, .extern_func => return for (export_indices) |export_index| {
- const @"export" = &zcu.all_exports.items[export_index];
- try fwd.writeAll("zig_extern ");
- if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn ");
- try dg.renderFunctionSignature(
- fwd,
- exported.getValue(zcu),
- exported.getAlign(zcu),
- .forward,
- .{ .@"export" = .{
- .main_name = main_name,
- .extern_name = @"export".opts.name,
- } },
- );
- try fwd.writeAll(";\n");
- },
- .variable => |variable| variable.is_const,
+ const exported_val = exported.getValue(zcu);
+ if (ip.isFunctionType(exported_val.typeOf(zcu).toIntern())) return for (export_indices) |export_index| {
+ const @"export" = &zcu.all_exports.items[export_index];
+ try fwd.writeAll("zig_extern ");
+ if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn ");
+ try dg.renderFunctionSignature(
+ fwd,
+ exported.getValue(zcu),
+ exported.getAlign(zcu),
+ .forward,
+ .{ .@"export" = .{
+ .main_name = main_name,
+ .extern_name = @"export".opts.name,
+ } },
+ );
+ try fwd.writeAll(";\n");
+ };
+ const is_const = switch (ip.indexToKey(exported_val.toIntern())) {
+ .func => unreachable,
+ .@"extern" => |@"extern"| @"extern".is_const,
+ .variable => false,
else => true,
};
for (export_indices) |export_index| {
@@ -4474,24 +4503,19 @@ fn airCall(
callee: {
known: {
- const fn_decl = fn_decl: {
- const callee_val = (try f.air.value(pl_op.operand, pt)) orelse break :known;
- break :fn_decl switch (zcu.intern_pool.indexToKey(callee_val.toIntern())) {
- .extern_func => |extern_func| extern_func.decl,
- .func => |func| func.owner_decl,
- .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
- .decl => |decl| decl,
- else => break :known,
- } else break :known,
+ const callee_val = (try f.air.value(pl_op.operand, pt)) orelse break :known;
+ const fn_nav = switch (zcu.intern_pool.indexToKey(callee_val.toIntern())) {
+ .@"extern" => |@"extern"| @"extern".owner_nav,
+ .func => |func| func.owner_nav,
+ .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
+ .nav => |nav| nav,
else => break :known,
- };
+ } else break :known,
+ else => break :known,
};
switch (modifier) {
- .auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl),
- inline .never_tail, .never_inline => |m| try writer.writeAll(try f.getLazyFnName(
- @unionInit(LazyFnKey, @tagName(m), fn_decl),
- @unionInit(LazyFnValue.Data, @tagName(m), {}),
- )),
+ .auto, .always_tail => try f.object.dg.renderNavName(writer, fn_nav),
+ inline .never_tail, .never_inline => |m| try writer.writeAll(try f.getLazyFnName(@unionInit(LazyFnKey, @tagName(m), fn_nav))),
else => unreachable,
}
break :callee;
@@ -4554,11 +4578,12 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue {
fn airDbgInlineBlock(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt;
const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
- const owner_decl = zcu.funcOwnerDeclPtr(extra.data.func);
+ const owner_nav = ip.getNav(zcu.funcInfo(extra.data.func).owner_nav);
const writer = f.object.writer();
- try writer.print("/* inline:{} */\n", .{owner_decl.fqn.fmt(&zcu.intern_pool)});
+ try writer.print("/* inline:{} */\n", .{owner_nav.fqn.fmt(&zcu.intern_pool)});
return lowerBlock(f, inst, @ptrCast(f.air.extra[extra.end..][0..extra.data.body_len]));
}
@@ -5059,7 +5084,7 @@ fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool
else => switch (value) {
.constant => |val| switch (dg.pt.zcu.intern_pool.indexToKey(val.toIntern())) {
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
- .decl => false,
+ .nav => false,
else => true,
} else true,
else => true,
@@ -6841,8 +6866,6 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue {
- const pt = f.object.dg.pt;
- const zcu = pt.zcu;
const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const inst_ty = f.typeOfIndex(inst);
@@ -6854,7 +6877,7 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue {
const local = try f.allocLocal(inst, inst_ty);
try f.writeCValue(writer, local, .Other);
try writer.print(" = {s}(", .{
- try f.getLazyFnName(.{ .tag_name = enum_ty.getOwnerDecl(zcu) }, .{ .tag_name = enum_ty }),
+ try f.getLazyFnName(.{ .tag_name = enum_ty.toIntern() }),
});
try f.writeCValue(writer, operand, .Other);
try writer.writeAll(");\n");
@@ -7390,18 +7413,17 @@ fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt;
const zcu = pt.zcu;
const inst_ty = f.typeOfIndex(inst);
- const decl_index = f.object.dg.pass.decl;
- const decl = zcu.declPtr(decl_index);
- const function_ctype = try f.ctypeFromType(decl.typeOf(zcu), .complete);
- const params_len = function_ctype.info(&f.object.dg.ctype_pool).function.param_ctypes.len;
+ const function_ty = zcu.navValue(f.object.dg.pass.nav).typeOf(zcu);
+ const function_info = (try f.ctypeFromType(function_ty, .complete)).info(&f.object.dg.ctype_pool).function;
+ assert(function_info.varargs);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
try writer.writeAll("va_start(*(va_list *)&");
try f.writeCValue(writer, local, .Other);
- if (params_len > 0) {
+ if (function_info.param_ctypes.len > 0) {
try writer.writeAll(", ");
- try f.writeCValue(writer, .{ .arg = params_len - 1 }, .FunctionArgument);
+ try f.writeCValue(writer, .{ .arg = function_info.param_ctypes.len - 1 }, .FunctionArgument);
}
try writer.writeAll(");\n");
return local;
@@ -7941,7 +7963,7 @@ const Materialize = struct {
pub fn start(f: *Function, inst: Air.Inst.Index, ty: Type, value: CValue) !Materialize {
return .{ .local = switch (value) {
- .local_ref, .constant, .decl_ref, .undef => try f.moveCValue(inst, ty, value),
+ .local_ref, .constant, .nav_ref, .undef => try f.moveCValue(inst, ty, value),
.new_local => |local| .{ .local = local },
else => value,
} };
diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig
index ecd1b8c2f7..943f54ae96 100644
--- a/src/codegen/c/Type.zig
+++ b/src/codegen/c/Type.zig
@@ -449,18 +449,18 @@ pub fn info(ctype: CType, pool: *const Pool) Info {
},
.fwd_decl_struct => return .{ .fwd_decl = .{
.tag = .@"struct",
- .name = .{ .owner_decl = @enumFromInt(item.data) },
+ .name = .{ .index = @enumFromInt(item.data) },
} },
.fwd_decl_union => return .{ .fwd_decl = .{
.tag = .@"union",
- .name = .{ .owner_decl = @enumFromInt(item.data) },
+ .name = .{ .index = @enumFromInt(item.data) },
} },
.aggregate_struct_anon => {
const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data);
return .{ .aggregate = .{
.tag = .@"struct",
.name = .{ .anon = .{
- .owner_decl = extra_trail.extra.owner_decl,
+ .index = extra_trail.extra.index,
.id = extra_trail.extra.id,
} },
.fields = .{
@@ -474,7 +474,7 @@ pub fn info(ctype: CType, pool: *const Pool) Info {
return .{ .aggregate = .{
.tag = .@"union",
.name = .{ .anon = .{
- .owner_decl = extra_trail.extra.owner_decl,
+ .index = extra_trail.extra.index,
.id = extra_trail.extra.id,
} },
.fields = .{
@@ -489,7 +489,7 @@ pub fn info(ctype: CType, pool: *const Pool) Info {
.tag = .@"struct",
.@"packed" = true,
.name = .{ .anon = .{
- .owner_decl = extra_trail.extra.owner_decl,
+ .index = extra_trail.extra.index,
.id = extra_trail.extra.id,
} },
.fields = .{
@@ -504,7 +504,7 @@ pub fn info(ctype: CType, pool: *const Pool) Info {
.tag = .@"union",
.@"packed" = true,
.name = .{ .anon = .{
- .owner_decl = extra_trail.extra.owner_decl,
+ .index = extra_trail.extra.index,
.id = extra_trail.extra.id,
} },
.fields = .{
@@ -834,7 +834,7 @@ pub const Info = union(enum) {
tag: AggregateTag,
name: union(enum) {
anon: Field.Slice,
- owner_decl: DeclIndex,
+ index: InternPool.Index,
},
};
@@ -843,7 +843,7 @@ pub const Info = union(enum) {
@"packed": bool = false,
name: union(enum) {
anon: struct {
- owner_decl: DeclIndex,
+ index: InternPool.Index,
id: u32,
},
fwd_decl: CType,
@@ -885,14 +885,14 @@ pub const Info = union(enum) {
rhs_pool,
pool_adapter,
),
- .owner_decl => |lhs_owner_decl| rhs_info.fwd_decl.name == .owner_decl and
- lhs_owner_decl == rhs_info.fwd_decl.name.owner_decl,
+ .index => |lhs_index| rhs_info.fwd_decl.name == .index and
+ lhs_index == rhs_info.fwd_decl.name.index,
},
.aggregate => |lhs_aggregate_info| lhs_aggregate_info.tag == rhs_info.aggregate.tag and
lhs_aggregate_info.@"packed" == rhs_info.aggregate.@"packed" and
switch (lhs_aggregate_info.name) {
.anon => |lhs_anon| rhs_info.aggregate.name == .anon and
- lhs_anon.owner_decl == rhs_info.aggregate.name.anon.owner_decl and
+ lhs_anon.index == rhs_info.aggregate.name.anon.index and
lhs_anon.id == rhs_info.aggregate.name.anon.id,
.fwd_decl => |lhs_fwd_decl| rhs_info.aggregate.name == .fwd_decl and
pool_adapter.eql(lhs_fwd_decl, rhs_info.aggregate.name.fwd_decl),
@@ -1105,7 +1105,7 @@ pub const Pool = struct {
tag: Info.AggregateTag,
name: union(enum) {
anon: []const Info.Field,
- owner_decl: DeclIndex,
+ index: InternPool.Index,
},
},
) !CType {
@@ -1145,13 +1145,13 @@ pub const Pool = struct {
.@"enum" => unreachable,
}, extra_index);
},
- .owner_decl => |owner_decl| {
- hasher.update(owner_decl);
+ .index => |index| {
+ hasher.update(index);
return pool.tagData(allocator, hasher, switch (fwd_decl_info.tag) {
.@"struct" => .fwd_decl_struct,
.@"union" => .fwd_decl_union,
.@"enum" => unreachable,
- }, @intFromEnum(owner_decl));
+ }, @intFromEnum(index));
},
}
}
@@ -1164,7 +1164,7 @@ pub const Pool = struct {
@"packed": bool = false,
name: union(enum) {
anon: struct {
- owner_decl: DeclIndex,
+ index: InternPool.Index,
id: u32,
},
fwd_decl: CType,
@@ -1176,7 +1176,7 @@ pub const Pool = struct {
switch (aggregate_info.name) {
.anon => |anon| {
const extra: AggregateAnon = .{
- .owner_decl = anon.owner_decl,
+ .index = anon.index,
.id = anon.id,
.fields_len = @intCast(aggregate_info.fields.len),
};
@@ -1683,7 +1683,7 @@ pub const Pool = struct {
.auto, .@"extern" => {
const fwd_decl = try pool.getFwdDecl(allocator, .{
.tag = .@"struct",
- .name = .{ .owner_decl = loaded_struct.decl.unwrap().? },
+ .name = .{ .index = ip_index },
});
if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt))
fwd_decl
@@ -1822,7 +1822,7 @@ pub const Pool = struct {
const has_tag = loaded_union.hasTag(ip);
const fwd_decl = try pool.getFwdDecl(allocator, .{
.tag = if (has_tag) .@"struct" else .@"union",
- .name = .{ .owner_decl = loaded_union.decl },
+ .name = .{ .index = ip_index },
});
if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt))
fwd_decl
@@ -1837,7 +1837,7 @@ pub const Pool = struct {
);
var hasher = Hasher.init;
var tag: Pool.Tag = .aggregate_union;
- var payload_align: Alignment = .@"1";
+ var payload_align: InternPool.Alignment = .@"1";
for (0..loaded_union.field_types.len) |field_index| {
const field_type = Type.fromInterned(
loaded_union.field_types.get(ip)[field_index],
@@ -1915,7 +1915,7 @@ pub const Pool = struct {
&hasher,
AggregateAnon,
.{
- .owner_decl = loaded_union.decl,
+ .index = ip_index,
.id = 0,
.fields_len = fields_len,
},
@@ -2017,7 +2017,7 @@ pub const Pool = struct {
.undef,
.simple_value,
.variable,
- .extern_func,
+ .@"extern",
.func,
.int,
.err,
@@ -2032,7 +2032,7 @@ pub const Pool = struct {
.aggregate,
.un,
.memoized_call,
- => unreachable,
+ => unreachable, // values, not types
},
}
}
@@ -2123,9 +2123,9 @@ pub const Pool = struct {
});
}
},
- .owner_decl => |owner_decl| pool.items.appendAssumeCapacity(.{
+ .index => |index| pool.items.appendAssumeCapacity(.{
.tag = tag,
- .data = @intFromEnum(owner_decl),
+ .data = @intFromEnum(index),
}),
},
.aggregate => |aggregate_info| {
@@ -2133,7 +2133,7 @@ pub const Pool = struct {
.tag = tag,
.data = switch (aggregate_info.name) {
.anon => |anon| try pool.addExtra(allocator, AggregateAnon, .{
- .owner_decl = anon.owner_decl,
+ .index = anon.index,
.id = anon.id,
.fields_len = aggregate_info.fields.len,
}, aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len),
@@ -2221,7 +2221,7 @@ pub const Pool = struct {
Pool.Tag => @compileError("pass tag to final"),
CType, CType.Index => @compileError("hash ctype.hash(pool) instead"),
String, String.Index => @compileError("hash string.slice(pool) instead"),
- u32, DeclIndex, Aligned.Flags => hasher.impl.update(std.mem.asBytes(&data)),
+ u32, InternPool.Index, Aligned.Flags => hasher.impl.update(std.mem.asBytes(&data)),
[]const u8 => hasher.impl.update(data),
else => @compileError("unhandled type: " ++ @typeName(@TypeOf(data))),
}
@@ -2426,7 +2426,7 @@ pub const Pool = struct {
};
const AggregateAnon = struct {
- owner_decl: DeclIndex,
+ index: InternPool.Index,
id: u32,
fields_len: u32,
};
@@ -2467,7 +2467,7 @@ pub const Pool = struct {
const value = @field(extra, field.name);
array.appendAssumeCapacity(switch (field.type) {
u32 => value,
- CType.Index, String.Index, DeclIndex => @intFromEnum(value),
+ CType.Index, String.Index, InternPool.Index => @intFromEnum(value),
Aligned.Flags => @bitCast(value),
else => @compileError("bad field type: " ++ field.name ++ ": " ++
@typeName(field.type)),
@@ -2530,7 +2530,7 @@ pub const Pool = struct {
inline for (fields, pool.extra.items[extra_index..][0..fields.len]) |field, value|
@field(extra, field.name) = switch (field.type) {
u32 => value,
- CType.Index, String.Index, DeclIndex => @enumFromInt(value),
+ CType.Index, String.Index, InternPool.Index => @enumFromInt(value),
Aligned.Flags => @bitCast(value),
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
};
@@ -2546,8 +2546,8 @@ pub const Pool = struct {
};
pub const AlignAs = packed struct {
- @"align": Alignment,
- abi: Alignment,
+ @"align": InternPool.Alignment,
+ abi: InternPool.Alignment,
pub fn fromAlignment(alignas: AlignAs) AlignAs {
assert(alignas.abi != .none);
@@ -2556,14 +2556,14 @@ pub const AlignAs = packed struct {
.abi = alignas.abi,
};
}
- pub fn fromAbiAlignment(abi: Alignment) AlignAs {
+ pub fn fromAbiAlignment(abi: InternPool.Alignment) AlignAs {
assert(abi != .none);
return .{ .@"align" = abi, .abi = abi };
}
pub fn fromByteUnits(@"align": u64, abi: u64) AlignAs {
return fromAlignment(.{
- .@"align" = Alignment.fromByteUnits(@"align"),
- .abi = Alignment.fromNonzeroByteUnits(abi),
+ .@"align" = InternPool.Alignment.fromByteUnits(@"align"),
+ .abi = InternPool.Alignment.fromNonzeroByteUnits(abi),
});
}
@@ -2578,11 +2578,10 @@ pub const AlignAs = packed struct {
}
};
-const Alignment = @import("../../InternPool.zig").Alignment;
const assert = std.debug.assert;
const CType = @This();
+const InternPool = @import("../../InternPool.zig");
const Module = @import("../../Package/Module.zig");
const std = @import("std");
const Type = @import("../../Type.zig");
const Zcu = @import("../../Zcu.zig");
-const DeclIndex = @import("../../InternPool.zig").DeclIndex;
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 8b13b1f205..89a24152fc 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -776,7 +776,7 @@ pub const Object = struct {
debug_enums: std.ArrayListUnmanaged(Builder.Metadata),
debug_globals: std.ArrayListUnmanaged(Builder.Metadata),
- debug_file_map: std.AutoHashMapUnmanaged(*const Zcu.File, Builder.Metadata),
+ debug_file_map: std.AutoHashMapUnmanaged(Zcu.File.Index, Builder.Metadata),
debug_type_map: std.AutoHashMapUnmanaged(Type, Builder.Metadata),
debug_unresolved_namespace_scopes: std.AutoArrayHashMapUnmanaged(InternPool.NamespaceIndex, Builder.Metadata),
@@ -790,11 +790,13 @@ pub const Object = struct {
/// version of the name and incorrectly get function not found in the llvm module.
/// * it works for functions not all globals.
/// Therefore, this table keeps track of the mapping.
- decl_map: std.AutoHashMapUnmanaged(InternPool.DeclIndex, Builder.Global.Index),
+ nav_map: std.AutoHashMapUnmanaged(InternPool.Nav.Index, Builder.Global.Index),
/// Same deal as `decl_map` but for anonymous declarations, which are always global constants.
- anon_decl_map: std.AutoHashMapUnmanaged(InternPool.Index, Builder.Global.Index),
- /// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction.
- named_enum_map: std.AutoHashMapUnmanaged(InternPool.DeclIndex, Builder.Function.Index),
+ uav_map: std.AutoHashMapUnmanaged(InternPool.Index, Builder.Global.Index),
+ /// Maps enum types to their corresponding LLVM functions for implementing the `tag_name` instruction.
+ enum_tag_name_map: std.AutoHashMapUnmanaged(InternPool.Index, Builder.Global.Index),
+ /// Serves the same purpose as `enum_tag_name_map` but for the `is_named_enum_value` instruction.
+ named_enum_map: std.AutoHashMapUnmanaged(InternPool.Index, Builder.Function.Index),
/// Maps Zig types to LLVM types. The table memory is backed by the GPA of
/// the compiler.
/// TODO when InternPool garbage collection is implemented, this map needs
@@ -963,8 +965,9 @@ pub const Object = struct {
.debug_type_map = .{},
.debug_unresolved_namespace_scopes = .{},
.target = target,
- .decl_map = .{},
- .anon_decl_map = .{},
+ .nav_map = .{},
+ .uav_map = .{},
+ .enum_tag_name_map = .{},
.named_enum_map = .{},
.type_map = .{},
.error_name_table = .none,
@@ -981,8 +984,9 @@ pub const Object = struct {
self.debug_file_map.deinit(gpa);
self.debug_type_map.deinit(gpa);
self.debug_unresolved_namespace_scopes.deinit(gpa);
- self.decl_map.deinit(gpa);
- self.anon_decl_map.deinit(gpa);
+ self.nav_map.deinit(gpa);
+ self.uav_map.deinit(gpa);
+ self.enum_tag_name_map.deinit(gpa);
self.named_enum_map.deinit(gpa);
self.type_map.deinit(gpa);
self.builder.deinit();
@@ -1108,7 +1112,7 @@ pub const Object = struct {
const fwd_ref = self.debug_unresolved_namespace_scopes.values()[i];
const namespace = zcu.namespacePtr(namespace_index);
- const debug_type = try self.lowerDebugType(namespace.getType(zcu));
+ const debug_type = try self.lowerDebugType(Type.fromInterned(namespace.owner_type));
self.builder.debugForwardReferenceSetType(fwd_ref, debug_type);
}
@@ -1328,24 +1332,22 @@ pub const Object = struct {
assert(std.meta.eql(pt, o.pt));
const zcu = pt.zcu;
const comp = zcu.comp;
+ const ip = &zcu.intern_pool;
const func = zcu.funcInfo(func_index);
- const decl_index = func.owner_decl;
- const decl = zcu.declPtr(decl_index);
- const namespace = zcu.namespacePtr(decl.src_namespace);
- const file_scope = namespace.fileScope(zcu);
- const owner_mod = file_scope.mod;
- const fn_info = zcu.typeToFunc(decl.typeOf(zcu)).?;
+ const nav = ip.getNav(func.owner_nav);
+ const file_scope = zcu.navFileScopeIndex(func.owner_nav);
+ const owner_mod = zcu.fileByIndex(file_scope).mod;
+ const fn_ty = Type.fromInterned(func.ty);
+ const fn_info = zcu.typeToFunc(fn_ty).?;
const target = owner_mod.resolved_target.result;
- const ip = &zcu.intern_pool;
- var dg: DeclGen = .{
+ var ng: NavGen = .{
.object = o,
- .decl_index = decl_index,
- .decl = decl,
+ .nav_index = func.owner_nav,
.err_msg = null,
};
- const function_index = try o.resolveLlvmFunction(decl_index);
+ const function_index = try o.resolveLlvmFunction(func.owner_nav);
var attributes = try function_index.ptrConst(&o.builder).attributes.toWip(&o.builder);
defer attributes.deinit(&o.builder);
@@ -1409,7 +1411,7 @@ pub const Object = struct {
} }, &o.builder);
}
- if (decl.@"linksection".toSlice(ip)) |section|
+ if (nav.status.resolved.@"linksection".toSlice(ip)) |section|
function_index.setSection(try o.builder.string(section), &o.builder);
var deinit_wip = true;
@@ -1422,7 +1424,7 @@ pub const Object = struct {
var llvm_arg_i: u32 = 0;
- // This gets the LLVM values from the function and stores them in `dg.args`.
+ // This gets the LLVM values from the function and stores them in `ng.args`.
const sret = firstParamSRet(fn_info, pt, target);
const ret_ptr: Builder.Value = if (sret) param: {
const param = wip.arg(llvm_arg_i);
@@ -1622,13 +1624,13 @@ pub const Object = struct {
const file, const subprogram = if (!wip.strip) debug_info: {
const file = try o.getDebugFile(file_scope);
- const line_number = decl.navSrcLine(zcu) + 1;
- const is_internal_linkage = decl.val.getExternFunc(zcu) == null;
- const debug_decl_type = try o.lowerDebugType(decl.typeOf(zcu));
+ const line_number = zcu.navSrcLine(func.owner_nav) + 1;
+ const is_internal_linkage = ip.indexToKey(nav.status.resolved.val) != .@"extern";
+ const debug_decl_type = try o.lowerDebugType(fn_ty);
const subprogram = try o.builder.debugSubprogram(
file,
- try o.builder.metadataString(decl.name.toSlice(ip)),
+ try o.builder.metadataString(nav.name.toSlice(ip)),
try o.builder.metadataStringFromStrtabString(function_index.name(&o.builder)),
line_number,
line_number + func.lbrace_line,
@@ -1654,7 +1656,7 @@ pub const Object = struct {
.gpa = gpa,
.air = air,
.liveness = liveness,
- .dg = &dg,
+ .ng = &ng,
.wip = wip,
.is_naked = fn_info.cc == .Naked,
.ret_ptr = ret_ptr,
@@ -1665,7 +1667,7 @@ pub const Object = struct {
.sync_scope = if (owner_mod.single_threaded) .singlethread else .system,
.file = file,
.scope = subprogram,
- .base_line = dg.decl.navSrcLine(zcu),
+ .base_line = zcu.navSrcLine(func.owner_nav),
.prev_dbg_line = 0,
.prev_dbg_column = 0,
.err_ret_trace = err_ret_trace,
@@ -1675,9 +1677,8 @@ pub const Object = struct {
fg.genBody(air.getMainBody()) catch |err| switch (err) {
error.CodegenFail => {
- decl.analysis = .codegen_failure;
- try zcu.failed_analysis.put(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?);
- dg.err_msg = null;
+ try zcu.failed_codegen.put(zcu.gpa, func.owner_nav, ng.err_msg.?);
+ ng.err_msg = null;
return;
},
else => |e| return e,
@@ -1686,20 +1687,17 @@ pub const Object = struct {
try fg.wip.finish();
}
- pub fn updateDecl(self: *Object, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void {
+ pub fn updateNav(self: *Object, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
assert(std.meta.eql(pt, self.pt));
- const decl = pt.zcu.declPtr(decl_index);
- var dg: DeclGen = .{
+ var ng: NavGen = .{
.object = self,
- .decl = decl,
- .decl_index = decl_index,
+ .nav_index = nav_index,
.err_msg = null,
};
- dg.genDecl() catch |err| switch (err) {
+ ng.genDecl() catch |err| switch (err) {
error.CodegenFail => {
- decl.analysis = .codegen_failure;
- try pt.zcu.failed_analysis.put(pt.zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?);
- dg.err_msg = null;
+ try pt.zcu.failed_codegen.put(pt.zcu.gpa, nav_index, ng.err_msg.?);
+ ng.err_msg = null;
return;
},
else => |e| return e,
@@ -1714,19 +1712,18 @@ pub const Object = struct {
) link.File.UpdateExportsError!void {
assert(std.meta.eql(pt, self.pt));
const zcu = pt.zcu;
- const decl_index = switch (exported) {
- .decl_index => |i| i,
- .value => |val| return updateExportedValue(self, zcu, val, export_indices),
+ const nav_index = switch (exported) {
+ .nav => |nav| nav,
+ .uav => |uav| return updateExportedValue(self, zcu, uav, export_indices),
};
const ip = &zcu.intern_pool;
- const global_index = self.decl_map.get(decl_index).?;
- const decl = zcu.declPtr(decl_index);
+ const global_index = self.nav_map.get(nav_index).?;
const comp = zcu.comp;
if (export_indices.len != 0) {
return updateExportedGlobal(self, zcu, global_index, export_indices);
} else {
- const fqn = try self.builder.strtabString(decl.fqn.toSlice(ip));
+ const fqn = try self.builder.strtabString(ip.getNav(nav_index).fqn.toSlice(ip));
try global_index.rename(fqn, &self.builder);
global_index.setLinkage(.internal, &self.builder);
if (comp.config.dll_export_fns)
@@ -1745,7 +1742,7 @@ pub const Object = struct {
const ip = &mod.intern_pool;
const main_exp_name = try o.builder.strtabString(mod.all_exports.items[export_indices[0]].opts.name.toSlice(ip));
const global_index = i: {
- const gop = try o.anon_decl_map.getOrPut(gpa, exported_value);
+ const gop = try o.uav_map.getOrPut(gpa, exported_value);
if (gop.found_existing) {
const global_index = gop.value_ptr.*;
try global_index.rename(main_exp_name, &o.builder);
@@ -1868,11 +1865,12 @@ pub const Object = struct {
global.delete(&self.builder);
}
- fn getDebugFile(o: *Object, file: *const Zcu.File) Allocator.Error!Builder.Metadata {
+ fn getDebugFile(o: *Object, file_index: Zcu.File.Index) Allocator.Error!Builder.Metadata {
const gpa = o.gpa;
- const gop = try o.debug_file_map.getOrPut(gpa, file);
- errdefer assert(o.debug_file_map.remove(file));
+ const gop = try o.debug_file_map.getOrPut(gpa, file_index);
+ errdefer assert(o.debug_file_map.remove(file_index));
if (gop.found_existing) return gop.value_ptr.*;
+ const file = o.pt.zcu.fileByIndex(file_index);
gop.value_ptr.* = try o.builder.debugFile(
try o.builder.metadataString(std.fs.path.basename(file.sub_file_path)),
dir_path: {
@@ -1930,17 +1928,13 @@ pub const Object = struct {
return debug_int_type;
},
.Enum => {
- const owner_decl_index = ty.getOwnerDecl(zcu);
- const owner_decl = zcu.declPtr(owner_decl_index);
-
if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
- const debug_enum_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
+ const debug_enum_type = try o.makeEmptyNamespaceDebugType(ty);
try o.debug_type_map.put(gpa, ty, debug_enum_type);
return debug_enum_type;
}
const enum_type = ip.loadEnumType(ty.toIntern());
-
const enumerators = try gpa.alloc(Builder.Metadata, enum_type.names.len);
defer gpa.free(enumerators);
@@ -1963,9 +1957,11 @@ pub const Object = struct {
);
}
- const file_scope = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu);
- const file = try o.getDebugFile(file_scope);
- const scope = try o.namespaceToDebugScope(owner_decl.src_namespace);
+ const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file);
+ const scope = if (ty.getParentNamespace(zcu).?.unwrap()) |parent_namespace|
+ try o.namespaceToDebugScope(parent_namespace)
+ else
+ file;
const name = try o.allocTypeName(ty);
defer gpa.free(name);
@@ -1974,7 +1970,7 @@ pub const Object = struct {
try o.builder.metadataString(name),
file,
scope,
- owner_decl.typeSrcLine(zcu) + 1, // Line
+ ty.typeDeclSrcLine(zcu).? + 1, // Line
try o.lowerDebugType(int_ty),
ty.abiSize(pt) * 8,
(ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
@@ -2138,14 +2134,18 @@ pub const Object = struct {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
- const owner_decl_index = ty.getOwnerDecl(zcu);
- const owner_decl = zcu.declPtr(owner_decl_index);
- const file_scope = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu);
+
+ const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file);
+ const scope = if (ty.getParentNamespace(zcu).?.unwrap()) |parent_namespace|
+ try o.namespaceToDebugScope(parent_namespace)
+ else
+ file;
+
const debug_opaque_type = try o.builder.debugStructType(
try o.builder.metadataString(name),
- try o.getDebugFile(file_scope),
- try o.namespaceToDebugScope(owner_decl.src_namespace),
- owner_decl.typeSrcLine(zcu) + 1, // Line
+ file,
+ scope,
+ ty.typeDeclSrcLine(zcu).? + 1, // Line
.none, // Underlying type
0, // Size
0, // Align
@@ -2460,8 +2460,7 @@ pub const Object = struct {
// into. Therefore we can satisfy this by making an empty namespace,
// rather than changing the frontend to unnecessarily resolve the
// struct field types.
- const owner_decl_index = ty.getOwnerDecl(zcu);
- const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
+ const debug_struct_type = try o.makeEmptyNamespaceDebugType(ty);
try o.debug_type_map.put(gpa, ty, debug_struct_type);
return debug_struct_type;
}
@@ -2470,8 +2469,7 @@ pub const Object = struct {
}
if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
- const owner_decl_index = ty.getOwnerDecl(zcu);
- const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
+ const debug_struct_type = try o.makeEmptyNamespaceDebugType(ty);
try o.debug_type_map.put(gpa, ty, debug_struct_type);
return debug_struct_type;
}
@@ -2536,8 +2534,6 @@ pub const Object = struct {
return debug_struct_type;
},
.Union => {
- const owner_decl_index = ty.getOwnerDecl(zcu);
-
const name = try o.allocTypeName(ty);
defer gpa.free(name);
@@ -2546,7 +2542,7 @@ pub const Object = struct {
!ty.hasRuntimeBitsIgnoreComptime(pt) or
!union_type.haveLayout(ip))
{
- const debug_union_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
+ const debug_union_type = try o.makeEmptyNamespaceDebugType(ty);
try o.debug_type_map.put(gpa, ty, debug_union_type);
return debug_union_type;
}
@@ -2762,8 +2758,7 @@ pub const Object = struct {
fn namespaceToDebugScope(o: *Object, namespace_index: InternPool.NamespaceIndex) !Builder.Metadata {
const zcu = o.pt.zcu;
const namespace = zcu.namespacePtr(namespace_index);
- const file_scope = namespace.fileScope(zcu);
- if (namespace.parent == .none) return try o.getDebugFile(file_scope);
+ if (namespace.parent == .none) return try o.getDebugFile(namespace.file_scope);
const gop = try o.debug_unresolved_namespace_scopes.getOrPut(o.gpa, namespace_index);
@@ -2772,15 +2767,19 @@ pub const Object = struct {
return gop.value_ptr.*;
}
- fn makeEmptyNamespaceDebugType(o: *Object, decl_index: InternPool.DeclIndex) !Builder.Metadata {
+ fn makeEmptyNamespaceDebugType(o: *Object, ty: Type) !Builder.Metadata {
const zcu = o.pt.zcu;
- const decl = zcu.declPtr(decl_index);
- const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu);
+ const ip = &zcu.intern_pool;
+ const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file);
+ const scope = if (ty.getParentNamespace(zcu).?.unwrap()) |parent_namespace|
+ try o.namespaceToDebugScope(parent_namespace)
+ else
+ file;
return o.builder.debugStructType(
- try o.builder.metadataString(decl.name.toSlice(&zcu.intern_pool)), // TODO use fully qualified name
- try o.getDebugFile(file_scope),
- try o.namespaceToDebugScope(decl.src_namespace),
- decl.typeSrcLine(zcu) + 1,
+ try o.builder.metadataString(ty.containerTypeName(ip).toSlice(ip)), // TODO use fully qualified name
+ file,
+ scope,
+ ty.typeDeclSrcLine(zcu).? + 1,
.none,
0,
0,
@@ -2791,25 +2790,24 @@ pub const Object = struct {
fn getStackTraceType(o: *Object) Allocator.Error!Type {
const pt = o.pt;
const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const std_mod = zcu.std_mod;
const std_file_imported = pt.importPkg(std_mod) catch unreachable;
- const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "builtin", .no_embedded_nulls);
- const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index);
- const std_namespace = zcu.namespacePtr(zcu.declPtr(std_file_root_decl.unwrap().?).src_namespace);
- const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }).?;
+ const builtin_str = try ip.getOrPutString(zcu.gpa, pt.tid, "builtin", .no_embedded_nulls);
+ const std_file_root_type = Type.fromInterned(zcu.fileRootType(std_file_imported.file_index));
+ const std_namespace = ip.namespacePtr(std_file_root_type.getNamespaceIndex(zcu).unwrap().?);
+ const builtin_nav = std_namespace.pub_decls.getKeyAdapted(builtin_str, Zcu.Namespace.NameAdapter{ .zcu = zcu }).?;
- const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "StackTrace", .no_embedded_nulls);
+ const stack_trace_str = try ip.getOrPutString(zcu.gpa, pt.tid, "StackTrace", .no_embedded_nulls);
// buffer is only used for int_type, `builtin` is a struct.
- const builtin_ty = zcu.declPtr(builtin_decl).val.toType();
+ const builtin_ty = zcu.navValue(builtin_nav).toType();
const builtin_namespace = zcu.namespacePtrUnwrap(builtin_ty.getNamespaceIndex(zcu)).?;
- const stack_trace_decl_index = builtin_namespace.decls.getKeyAdapted(stack_trace_str, Zcu.DeclAdapter{ .zcu = zcu }).?;
- const stack_trace_decl = zcu.declPtr(stack_trace_decl_index);
+ const stack_trace_nav = builtin_namespace.pub_decls.getKeyAdapted(stack_trace_str, Zcu.Namespace.NameAdapter{ .zcu = zcu }).?;
// Sema should have ensured that StackTrace was analyzed.
- assert(stack_trace_decl.has_tv);
- return stack_trace_decl.val.toType();
+ return zcu.navValue(stack_trace_nav).toType();
}
fn allocTypeName(o: *Object, ty: Type) Allocator.Error![:0]const u8 {
@@ -2824,29 +2822,33 @@ pub const Object = struct {
/// completed, so if any attributes rely on that, they must be done in updateFunc, not here.
fn resolveLlvmFunction(
o: *Object,
- decl_index: InternPool.DeclIndex,
+ nav_index: InternPool.Nav.Index,
) Allocator.Error!Builder.Function.Index {
const pt = o.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = o.gpa;
- const decl = zcu.declPtr(decl_index);
- const namespace = zcu.namespacePtr(decl.src_namespace);
- const owner_mod = namespace.fileScope(zcu).mod;
- const zig_fn_type = decl.typeOf(zcu);
- const gop = try o.decl_map.getOrPut(gpa, decl_index);
+ const nav = ip.getNav(nav_index);
+ const owner_mod = zcu.navFileScope(nav_index).mod;
+ const resolved = nav.status.resolved;
+ const val = Value.fromInterned(resolved.val);
+ const ty = val.typeOf(zcu);
+ const gop = try o.nav_map.getOrPut(gpa, nav_index);
if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.function;
- assert(decl.has_tv);
- const fn_info = zcu.typeToFunc(zig_fn_type).?;
+ const fn_info = zcu.typeToFunc(ty).?;
const target = owner_mod.resolved_target.result;
const sret = firstParamSRet(fn_info, pt, target);
- const is_extern = decl.isExtern(zcu);
+ const is_extern, const lib_name = switch (ip.indexToKey(val.toIntern())) {
+ .variable => |variable| .{ false, variable.lib_name },
+ .@"extern" => |@"extern"| .{ true, @"extern".lib_name },
+ else => .{ false, .none },
+ };
const function_index = try o.builder.addFunction(
- try o.lowerType(zig_fn_type),
- try o.builder.strtabString((if (is_extern) decl.name else decl.fqn).toSlice(ip)),
- toLlvmAddressSpace(decl.@"addrspace", target),
+ try o.lowerType(ty),
+ try o.builder.strtabString((if (is_extern) nav.name else nav.fqn).toSlice(ip)),
+ toLlvmAddressSpace(resolved.@"addrspace", target),
);
gop.value_ptr.* = function_index.ptrConst(&o.builder).global;
@@ -2860,12 +2862,12 @@ pub const Object = struct {
if (target.isWasm()) {
try attributes.addFnAttr(.{ .string = .{
.kind = try o.builder.string("wasm-import-name"),
- .value = try o.builder.string(decl.name.toSlice(ip)),
+ .value = try o.builder.string(nav.name.toSlice(ip)),
} }, &o.builder);
- if (decl.getOwnedExternFunc(zcu).?.lib_name.toSlice(ip)) |lib_name| {
- if (!std.mem.eql(u8, lib_name, "c")) try attributes.addFnAttr(.{ .string = .{
+ if (lib_name.toSlice(ip)) |lib_name_slice| {
+ if (!std.mem.eql(u8, lib_name_slice, "c")) try attributes.addFnAttr(.{ .string = .{
.kind = try o.builder.string("wasm-import-module"),
- .value = try o.builder.string(lib_name),
+ .value = try o.builder.string(lib_name_slice),
} }, &o.builder);
}
}
@@ -2901,8 +2903,8 @@ pub const Object = struct {
else => function_index.setCallConv(toLlvmCallConv(fn_info.cc, target), &o.builder),
}
- if (decl.alignment != .none)
- function_index.setAlignment(decl.alignment.toLlvm(), &o.builder);
+ if (resolved.alignment != .none)
+ function_index.setAlignment(resolved.alignment.toLlvm(), &o.builder);
// Function attributes that are independent of analysis results of the function body.
try o.addCommonFnAttributes(&attributes, owner_mod);
@@ -3006,15 +3008,15 @@ pub const Object = struct {
}
}
- fn resolveGlobalAnonDecl(
+ fn resolveGlobalUav(
o: *Object,
- decl_val: InternPool.Index,
+ uav: InternPool.Index,
llvm_addr_space: Builder.AddrSpace,
alignment: InternPool.Alignment,
) Error!Builder.Variable.Index {
assert(alignment != .none);
// TODO: Add address space to the anon_decl_map
- const gop = try o.anon_decl_map.getOrPut(o.gpa, decl_val);
+ const gop = try o.uav_map.getOrPut(o.gpa, uav);
if (gop.found_existing) {
// Keep the greater of the two alignments.
const variable_index = gop.value_ptr.ptr(&o.builder).kind.variable;
@@ -3023,19 +3025,19 @@ pub const Object = struct {
variable_index.setAlignment(max_alignment.toLlvm(), &o.builder);
return variable_index;
}
- errdefer assert(o.anon_decl_map.remove(decl_val));
+ errdefer assert(o.uav_map.remove(uav));
const mod = o.pt.zcu;
- const decl_ty = mod.intern_pool.typeOf(decl_val);
+ const decl_ty = mod.intern_pool.typeOf(uav);
const variable_index = try o.builder.addVariable(
- try o.builder.strtabStringFmt("__anon_{d}", .{@intFromEnum(decl_val)}),
+ try o.builder.strtabStringFmt("__anon_{d}", .{@intFromEnum(uav)}),
try o.lowerType(Type.fromInterned(decl_ty)),
llvm_addr_space,
);
gop.value_ptr.* = variable_index.ptrConst(&o.builder).global;
- try variable_index.setInitializer(try o.lowerValue(decl_val), &o.builder);
+ try variable_index.setInitializer(try o.lowerValue(uav), &o.builder);
variable_index.setLinkage(.internal, &o.builder);
variable_index.setMutability(.constant, &o.builder);
variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
@@ -3043,24 +3045,29 @@ pub const Object = struct {
return variable_index;
}
- fn resolveGlobalDecl(
+ fn resolveGlobalNav(
o: *Object,
- decl_index: InternPool.DeclIndex,
+ nav_index: InternPool.Nav.Index,
) Allocator.Error!Builder.Variable.Index {
- const gop = try o.decl_map.getOrPut(o.gpa, decl_index);
+ const gop = try o.nav_map.getOrPut(o.gpa, nav_index);
if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable;
- errdefer assert(o.decl_map.remove(decl_index));
+ errdefer assert(o.nav_map.remove(nav_index));
const pt = o.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const decl = zcu.declPtr(decl_index);
- const is_extern = decl.isExtern(zcu);
+ const nav = ip.getNav(nav_index);
+ const resolved = nav.status.resolved;
+ const is_extern, const is_threadlocal, const is_weak_linkage = switch (ip.indexToKey(resolved.val)) {
+ .variable => |variable| .{ false, variable.is_threadlocal, variable.is_weak_linkage },
+ .@"extern" => |@"extern"| .{ true, @"extern".is_threadlocal, @"extern".is_weak_linkage },
+ else => .{ false, false, false },
+ };
const variable_index = try o.builder.addVariable(
- try o.builder.strtabString((if (is_extern) decl.name else decl.fqn).toSlice(ip)),
- try o.lowerType(decl.typeOf(zcu)),
- toLlvmGlobalAddressSpace(decl.@"addrspace", zcu.getTarget()),
+ try o.builder.strtabString((if (is_extern) nav.name else nav.fqn).toSlice(ip)),
+ try o.lowerType(Type.fromInterned(nav.typeOf(ip))),
+ toLlvmGlobalAddressSpace(resolved.@"addrspace", zcu.getTarget()),
);
gop.value_ptr.* = variable_index.ptrConst(&o.builder).global;
@@ -3068,15 +3075,9 @@ pub const Object = struct {
if (is_extern) {
variable_index.setLinkage(.external, &o.builder);
variable_index.setUnnamedAddr(.default, &o.builder);
- if (decl.val.getVariable(zcu)) |decl_var| {
- const decl_namespace = zcu.namespacePtr(decl.src_namespace);
- const single_threaded = decl_namespace.fileScope(zcu).mod.single_threaded;
- variable_index.setThreadLocal(
- if (decl_var.is_threadlocal and !single_threaded) .generaldynamic else .default,
- &o.builder,
- );
- if (decl_var.is_weak_linkage) variable_index.setLinkage(.extern_weak, &o.builder);
- }
+ if (is_threadlocal and !zcu.navFileScope(nav_index).mod.single_threaded)
+ variable_index.setThreadLocal(.generaldynamic, &o.builder);
+ if (is_weak_linkage) variable_index.setLinkage(.extern_weak, &o.builder);
} else {
variable_index.setLinkage(.internal, &o.builder);
variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
@@ -3286,8 +3287,6 @@ pub const Object = struct {
return int_ty;
}
- const decl = mod.declPtr(struct_type.decl.unwrap().?);
-
var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){};
defer llvm_field_types.deinit(o.gpa);
// Although we can estimate how much capacity to add, these cannot be
@@ -3351,7 +3350,7 @@ pub const Object = struct {
);
}
- const ty = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip)));
+ const ty = try o.builder.opaqueType(try o.builder.string(t.containerTypeName(ip).toSlice(ip)));
try o.type_map.put(o.gpa, t.toIntern(), ty);
o.builder.namedTypeSetBody(
@@ -3440,8 +3439,6 @@ pub const Object = struct {
return enum_tag_ty;
}
- const decl = mod.declPtr(union_obj.decl);
-
const aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[layout.most_aligned_field]);
const aligned_field_llvm_ty = try o.lowerType(aligned_field_ty);
@@ -3460,7 +3457,7 @@ pub const Object = struct {
};
if (layout.tag_size == 0) {
- const ty = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip)));
+ const ty = try o.builder.opaqueType(try o.builder.string(t.containerTypeName(ip).toSlice(ip)));
try o.type_map.put(o.gpa, t.toIntern(), ty);
o.builder.namedTypeSetBody(
@@ -3488,7 +3485,7 @@ pub const Object = struct {
llvm_fields_len += 1;
}
- const ty = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip)));
+ const ty = try o.builder.opaqueType(try o.builder.string(t.containerTypeName(ip).toSlice(ip)));
try o.type_map.put(o.gpa, t.toIntern(), ty);
o.builder.namedTypeSetBody(
@@ -3500,8 +3497,7 @@ pub const Object = struct {
.opaque_type => {
const gop = try o.type_map.getOrPut(o.gpa, t.toIntern());
if (!gop.found_existing) {
- const decl = mod.declPtr(ip.loadOpaqueType(t.toIntern()).decl);
- gop.value_ptr.* = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip)));
+ gop.value_ptr.* = try o.builder.opaqueType(try o.builder.string(t.containerTypeName(ip).toSlice(ip)));
}
return gop.value_ptr.*;
},
@@ -3512,7 +3508,7 @@ pub const Object = struct {
.undef,
.simple_value,
.variable,
- .extern_func,
+ .@"extern",
.func,
.int,
.err,
@@ -3632,15 +3628,13 @@ pub const Object = struct {
const ty = Type.fromInterned(val_key.typeOf());
switch (val_key) {
- .extern_func => |extern_func| {
- const fn_decl_index = extern_func.decl;
- const function_index = try o.resolveLlvmFunction(fn_decl_index);
+ .@"extern" => |@"extern"| {
+ const function_index = try o.resolveLlvmFunction(@"extern".owner_nav);
const ptr = function_index.ptrConst(&o.builder).global.toConst();
return o.builder.convConst(ptr, llvm_int_ty);
},
.func => |func| {
- const fn_decl_index = func.owner_decl;
- const function_index = try o.resolveLlvmFunction(fn_decl_index);
+ const function_index = try o.resolveLlvmFunction(func.owner_nav);
const ptr = function_index.ptrConst(&o.builder).global.toConst();
return o.builder.convConst(ptr, llvm_int_ty);
},
@@ -3783,14 +3777,12 @@ pub const Object = struct {
.enum_literal,
.empty_enum_value,
=> unreachable, // non-runtime values
- .extern_func => |extern_func| {
- const fn_decl_index = extern_func.decl;
- const function_index = try o.resolveLlvmFunction(fn_decl_index);
+ .@"extern" => |@"extern"| {
+ const function_index = try o.resolveLlvmFunction(@"extern".owner_nav);
return function_index.ptrConst(&o.builder).global.toConst();
},
.func => |func| {
- const fn_decl_index = func.owner_decl;
- const function_index = try o.resolveLlvmFunction(fn_decl_index);
+ const function_index = try o.resolveLlvmFunction(func.owner_nav);
return function_index.ptrConst(&o.builder).global.toConst();
},
.int => {
@@ -4284,14 +4276,14 @@ pub const Object = struct {
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
- .decl => |decl| {
- const base_ptr = try o.lowerDeclRefValue(decl);
+ .nav => |nav| {
+ const base_ptr = try o.lowerNavRefValue(nav);
return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{
try o.builder.intConst(.i64, offset),
});
},
- .anon_decl => |ad| {
- const base_ptr = try o.lowerAnonDeclRef(ad);
+ .uav => |uav| {
+ const base_ptr = try o.lowerUavRef(uav);
return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{
try o.builder.intConst(.i64, offset),
});
@@ -4332,39 +4324,37 @@ pub const Object = struct {
};
}
- /// This logic is very similar to `lowerDeclRefValue` but for anonymous declarations.
+ /// This logic is very similar to `lowerNavRefValue` but for anonymous declarations.
/// Maybe the logic could be unified.
- fn lowerAnonDeclRef(
+ fn lowerUavRef(
o: *Object,
- anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
+ uav: InternPool.Key.Ptr.BaseAddr.Uav,
) Error!Builder.Constant {
const pt = o.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
- const decl_val = anon_decl.val;
- const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
+ const uav_val = uav.val;
+ const uav_ty = Type.fromInterned(ip.typeOf(uav_val));
const target = mod.getTarget();
- if (Value.fromInterned(decl_val).getFunction(mod)) |func| {
- _ = func;
- @panic("TODO");
- } else if (Value.fromInterned(decl_val).getExternFunc(mod)) |func| {
- _ = func;
- @panic("TODO");
+ switch (ip.indexToKey(uav_val)) {
+ .func => @panic("TODO"),
+ .@"extern" => @panic("TODO"),
+ else => {},
}
- const ptr_ty = Type.fromInterned(anon_decl.orig_ty);
+ const ptr_ty = Type.fromInterned(uav.orig_ty);
- const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
- if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or
- (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty);
+ const is_fn_body = uav_ty.zigTypeTag(mod) == .Fn;
+ if ((!is_fn_body and !uav_ty.hasRuntimeBits(pt)) or
+ (is_fn_body and mod.typeToFunc(uav_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty);
if (is_fn_body)
@panic("TODO");
const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(mod), target);
const alignment = ptr_ty.ptrAlignment(pt);
- const llvm_global = (try o.resolveGlobalAnonDecl(decl_val, llvm_addr_space, alignment)).ptrConst(&o.builder).global;
+ const llvm_global = (try o.resolveGlobalUav(uav.val, llvm_addr_space, alignment)).ptrConst(&o.builder).global;
const llvm_val = try o.builder.convConst(
llvm_global.toConst(),
@@ -4374,44 +4364,41 @@ pub const Object = struct {
return o.builder.convConst(llvm_val, try o.lowerType(ptr_ty));
}
- fn lowerDeclRefValue(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant {
+ fn lowerNavRefValue(o: *Object, nav_index: InternPool.Nav.Index) Allocator.Error!Builder.Constant {
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
// In the case of something like:
// fn foo() void {}
// const bar = foo;
// ... &bar;
// `bar` is just an alias and we actually want to lower a reference to `foo`.
- const decl = mod.declPtr(decl_index);
- if (decl.val.getFunction(mod)) |func| {
- if (func.owner_decl != decl_index) {
- return o.lowerDeclRefValue(func.owner_decl);
- }
- } else if (decl.val.getExternFunc(mod)) |func| {
- if (func.decl != decl_index) {
- return o.lowerDeclRefValue(func.decl);
- }
- }
+ const owner_nav_index = switch (ip.indexToKey(zcu.navValue(nav_index).toIntern())) {
+ .func => |func| func.owner_nav,
+ .@"extern" => |@"extern"| @"extern".owner_nav,
+ else => nav_index,
+ };
+ const owner_nav = ip.getNav(owner_nav_index);
- const decl_ty = decl.typeOf(mod);
- const ptr_ty = try decl.declPtrType(pt);
+ const nav_ty = Type.fromInterned(owner_nav.typeOf(ip));
+ const ptr_ty = try pt.navPtrType(owner_nav_index);
- const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
- if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or
- (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic))
+ const is_fn_body = nav_ty.zigTypeTag(zcu) == .Fn;
+ if ((!is_fn_body and !nav_ty.hasRuntimeBits(pt)) or
+ (is_fn_body and zcu.typeToFunc(nav_ty).?.is_generic))
{
return o.lowerPtrToVoid(ptr_ty);
}
const llvm_global = if (is_fn_body)
- (try o.resolveLlvmFunction(decl_index)).ptrConst(&o.builder).global
+ (try o.resolveLlvmFunction(owner_nav_index)).ptrConst(&o.builder).global
else
- (try o.resolveGlobalDecl(decl_index)).ptrConst(&o.builder).global;
+ (try o.resolveGlobalNav(owner_nav_index)).ptrConst(&o.builder).global;
const llvm_val = try o.builder.convConst(
llvm_global.toConst(),
- try o.builder.ptrType(toLlvmAddressSpace(decl.@"addrspace", mod.getTarget())),
+ try o.builder.ptrType(toLlvmAddressSpace(owner_nav.status.resolved.@"addrspace", zcu.getTarget())),
);
return o.builder.convConst(llvm_val, try o.lowerType(ptr_ty));
@@ -4553,18 +4540,16 @@ pub const Object = struct {
const ip = &zcu.intern_pool;
const enum_type = ip.loadEnumType(enum_ty.toIntern());
- // TODO: detect when the type changes and re-emit this function.
- const gop = try o.decl_map.getOrPut(o.gpa, enum_type.decl);
+ const gop = try o.enum_tag_name_map.getOrPut(o.gpa, enum_ty.toIntern());
if (gop.found_existing) return gop.value_ptr.ptrConst(&o.builder).kind.function;
- errdefer assert(o.decl_map.remove(enum_type.decl));
+ errdefer assert(o.enum_tag_name_map.remove(enum_ty.toIntern()));
const usize_ty = try o.lowerType(Type.usize);
const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0);
- const decl = zcu.declPtr(enum_type.decl);
const target = zcu.root_mod.resolved_target.result;
const function_index = try o.builder.addFunction(
try o.builder.fnType(ret_ty, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal),
- try o.builder.strtabStringFmt("__zig_tag_name_{}", .{decl.fqn.fmt(ip)}),
+ try o.builder.strtabStringFmt("__zig_tag_name_{}", .{enum_type.name.fmt(ip)}),
toLlvmAddressSpace(.generic, target),
);
@@ -4624,86 +4609,73 @@ pub const Object = struct {
}
};
-pub const DeclGen = struct {
+pub const NavGen = struct {
object: *Object,
- decl: *Zcu.Decl,
- decl_index: InternPool.DeclIndex,
+ nav_index: InternPool.Nav.Index,
err_msg: ?*Zcu.ErrorMsg,
- fn ownerModule(dg: DeclGen) *Package.Module {
- const o = dg.object;
- const zcu = o.pt.zcu;
- const namespace = zcu.namespacePtr(dg.decl.src_namespace);
- const file_scope = namespace.fileScope(zcu);
- return file_scope.mod;
+ fn ownerModule(ng: NavGen) *Package.Module {
+ return ng.object.pt.zcu.navFileScope(ng.nav_index).mod;
}
- fn todo(dg: *DeclGen, comptime format: []const u8, args: anytype) Error {
+ fn todo(ng: *NavGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
- assert(dg.err_msg == null);
- const o = dg.object;
+ assert(ng.err_msg == null);
+ const o = ng.object;
const gpa = o.gpa;
- const src_loc = dg.decl.navSrcLoc(o.pt.zcu);
- dg.err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args);
+ const src_loc = o.pt.zcu.navSrcLoc(ng.nav_index);
+ ng.err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args);
return error.CodegenFail;
}
- fn genDecl(dg: *DeclGen) !void {
- const o = dg.object;
+ fn genDecl(ng: *NavGen) !void {
+ const o = ng.object;
const pt = o.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const decl = dg.decl;
- const decl_index = dg.decl_index;
- assert(decl.has_tv);
+ const nav_index = ng.nav_index;
+ const nav = ip.getNav(nav_index);
+ const resolved = nav.status.resolved;
+
+ const is_extern, const lib_name, const is_threadlocal, const is_weak_linkage, const is_const, const init_val, const owner_nav = switch (ip.indexToKey(resolved.val)) {
+ .variable => |variable| .{ false, variable.lib_name, variable.is_threadlocal, variable.is_weak_linkage, false, variable.init, variable.owner_nav },
+ .@"extern" => |@"extern"| .{ true, @"extern".lib_name, @"extern".is_threadlocal, @"extern".is_weak_linkage, @"extern".is_const, .none, @"extern".owner_nav },
+ else => .{ false, .none, false, false, true, resolved.val, nav_index },
+ };
+ const ty = Type.fromInterned(nav.typeOf(ip));
- if (decl.val.getExternFunc(zcu)) |extern_func| {
- _ = try o.resolveLlvmFunction(extern_func.decl);
+ if (is_extern and ip.isFunctionType(ty.toIntern())) {
+ _ = try o.resolveLlvmFunction(owner_nav);
} else {
- const variable_index = try o.resolveGlobalDecl(decl_index);
- variable_index.setAlignment(
- decl.getAlignment(pt).toLlvm(),
- &o.builder,
- );
- if (decl.@"linksection".toSlice(ip)) |section|
+ const variable_index = try o.resolveGlobalNav(nav_index);
+ variable_index.setAlignment(pt.navAlignment(nav_index).toLlvm(), &o.builder);
+ if (resolved.@"linksection".toSlice(ip)) |section|
variable_index.setSection(try o.builder.string(section), &o.builder);
- assert(decl.has_tv);
- const init_val = if (decl.val.getVariable(zcu)) |decl_var| decl_var.init else init_val: {
- variable_index.setMutability(.constant, &o.builder);
- break :init_val decl.val.toIntern();
- };
+ if (is_const) variable_index.setMutability(.constant, &o.builder);
try variable_index.setInitializer(switch (init_val) {
.none => .no_init,
else => try o.lowerValue(init_val),
}, &o.builder);
- if (decl.val.getVariable(zcu)) |decl_var| {
- const decl_namespace = zcu.namespacePtr(decl.src_namespace);
- const single_threaded = decl_namespace.fileScope(zcu).mod.single_threaded;
- variable_index.setThreadLocal(
- if (decl_var.is_threadlocal and !single_threaded) .generaldynamic else .default,
- &o.builder,
- );
- }
-
- const line_number = decl.navSrcLine(zcu) + 1;
+ const file_scope = zcu.navFileScopeIndex(nav_index);
+ const mod = zcu.fileByIndex(file_scope).mod;
+ if (is_threadlocal and !mod.single_threaded)
+ variable_index.setThreadLocal(.generaldynamic, &o.builder);
- const namespace = zcu.namespacePtr(decl.src_namespace);
- const file_scope = namespace.fileScope(zcu);
- const owner_mod = file_scope.mod;
+ const line_number = zcu.navSrcLine(nav_index) + 1;
- if (!owner_mod.strip) {
+ if (!mod.strip) {
const debug_file = try o.getDebugFile(file_scope);
const debug_global_var = try o.builder.debugGlobalVar(
- try o.builder.metadataString(decl.name.toSlice(ip)), // Name
+ try o.builder.metadataString(nav.name.toSlice(ip)), // Name
try o.builder.metadataStringFromStrtabString(variable_index.name(&o.builder)), // Linkage name
debug_file, // File
debug_file, // Scope
line_number,
- try o.lowerDebugType(decl.typeOf(zcu)),
+ try o.lowerDebugType(ty),
variable_index,
- .{ .local = !decl.isExtern(zcu) },
+ .{ .local = !is_extern },
);
const debug_expression = try o.builder.debugExpression(&.{});
@@ -4718,18 +4690,18 @@ pub const DeclGen = struct {
}
}
- if (decl.isExtern(zcu)) {
- const global_index = o.decl_map.get(decl_index).?;
+ if (is_extern) {
+ const global_index = o.nav_map.get(nav_index).?;
const decl_name = decl_name: {
- if (zcu.getTarget().isWasm() and decl.typeOf(zcu).zigTypeTag(zcu) == .Fn) {
- if (decl.getOwnedExternFunc(zcu).?.lib_name.toSlice(ip)) |lib_name| {
- if (!std.mem.eql(u8, lib_name, "c")) {
- break :decl_name try o.builder.strtabStringFmt("{}|{s}", .{ decl.name.fmt(ip), lib_name });
+ if (zcu.getTarget().isWasm() and ty.zigTypeTag(zcu) == .Fn) {
+ if (lib_name.toSlice(ip)) |lib_name_slice| {
+ if (!std.mem.eql(u8, lib_name_slice, "c")) {
+ break :decl_name try o.builder.strtabStringFmt("{}|{s}", .{ nav.name.fmt(ip), lib_name_slice });
}
}
}
- break :decl_name try o.builder.strtabString(decl.name.toSlice(ip));
+ break :decl_name try o.builder.strtabString(nav.name.toSlice(ip));
};
if (o.builder.getGlobal(decl_name)) |other_global| {
@@ -4746,16 +4718,14 @@ pub const DeclGen = struct {
if (zcu.comp.config.dll_export_fns)
global_index.setDllStorageClass(.default, &o.builder);
- if (decl.val.getVariable(zcu)) |decl_var| {
- if (decl_var.is_weak_linkage) global_index.setLinkage(.extern_weak, &o.builder);
- }
+ if (is_weak_linkage) global_index.setLinkage(.extern_weak, &o.builder);
}
}
};
pub const FuncGen = struct {
gpa: Allocator,
- dg: *DeclGen,
+ ng: *NavGen,
air: Air,
liveness: Liveness,
wip: Builder.WipFunction,
@@ -4815,7 +4785,7 @@ pub const FuncGen = struct {
fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
- return self.dg.todo(format, args);
+ return self.ng.todo(format, args);
}
fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !Builder.Value {
@@ -4823,13 +4793,13 @@ pub const FuncGen = struct {
const gop = try self.func_inst_table.getOrPut(gpa, inst);
if (gop.found_existing) return gop.value_ptr.*;
- const llvm_val = try self.resolveValue((try self.air.value(inst, self.dg.object.pt)).?);
+ const llvm_val = try self.resolveValue((try self.air.value(inst, self.ng.object.pt)).?);
gop.value_ptr.* = llvm_val.toValue();
return llvm_val.toValue();
}
fn resolveValue(self: *FuncGen, val: Value) Error!Builder.Constant {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const ty = val.typeOf(pt.zcu);
const llvm_val = try o.lowerValue(val.toIntern());
@@ -4855,7 +4825,7 @@ pub const FuncGen = struct {
}
fn resolveNullOptUsize(self: *FuncGen) Error!Builder.Constant {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
if (o.null_opt_usize == .no_init) {
o.null_opt_usize = try self.resolveValue(Value.fromInterned(try pt.intern(.{ .opt = .{
@@ -4867,7 +4837,7 @@ pub const FuncGen = struct {
}
fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const ip = &mod.intern_pool;
const air_tags = self.air.instructions.items(.tag);
@@ -5132,20 +5102,19 @@ pub const FuncGen = struct {
defer self.scope = old_scope;
if (maybe_inline_func) |inline_func| {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const func = zcu.funcInfo(inline_func);
- const decl_index = func.owner_decl;
- const decl = zcu.declPtr(decl_index);
- const namespace = zcu.namespacePtr(decl.src_namespace);
- const file_scope = namespace.fileScope(zcu);
- const owner_mod = file_scope.mod;
+ const nav = ip.getNav(func.owner_nav);
+ const file_scope = zcu.navFileScopeIndex(func.owner_nav);
+ const mod = zcu.fileByIndex(file_scope).mod;
self.file = try o.getDebugFile(file_scope);
- const line_number = decl.navSrcLine(zcu) + 1;
+ const line_number = zcu.navSrcLine(func.owner_nav) + 1;
self.inlined = self.wip.debug_location;
const fn_ty = try pt.funcType(.{
@@ -5155,15 +5124,15 @@ pub const FuncGen = struct {
self.scope = try o.builder.debugSubprogram(
self.file,
- try o.builder.metadataString(decl.name.toSlice(&zcu.intern_pool)),
- try o.builder.metadataString(decl.fqn.toSlice(&zcu.intern_pool)),
+ try o.builder.metadataString(nav.name.toSlice(&zcu.intern_pool)),
+ try o.builder.metadataString(nav.fqn.toSlice(&zcu.intern_pool)),
line_number,
line_number + func.lbrace_line,
try o.lowerDebugType(fn_ty),
.{
.di_flags = .{ .StaticMember = true },
.sp_flags = .{
- .Optimized = owner_mod.optimize_mode != .Debug,
+ .Optimized = mod.optimize_mode != .Debug,
.Definition = true,
.LocalToUnit = true, // TODO: we can't know this at this point, since the function could be exported later!
},
@@ -5171,7 +5140,7 @@ pub const FuncGen = struct {
o.debug_compile_unit,
);
- self.base_line = decl.navSrcLine(zcu);
+ self.base_line = zcu.navSrcLine(func.owner_nav);
const inlined_at_location = try self.wip.debug_location.toMetadata(&o.builder);
self.wip.debug_location = .{
.location = .{
@@ -5183,7 +5152,7 @@ pub const FuncGen = struct {
};
}
- self.scope = try self.dg.object.builder.debugLexicalBlock(
+ self.scope = try self.ng.object.builder.debugLexicalBlock(
self.scope,
self.file,
self.prev_dbg_line,
@@ -5214,7 +5183,7 @@ pub const FuncGen = struct {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
@@ -5515,14 +5484,15 @@ pub const FuncGen = struct {
}
fn buildSimplePanic(fg: *FuncGen, panic_id: Zcu.PanicId) !void {
- const o = fg.dg.object;
- const mod = o.pt.zcu;
- const msg_decl_index = mod.panic_messages[@intFromEnum(panic_id)].unwrap().?;
- const msg_decl = mod.declPtr(msg_decl_index);
- const msg_len = msg_decl.typeOf(mod).childType(mod).arrayLen(mod);
- const msg_ptr = try o.lowerValue(msg_decl.val.toIntern());
+ const o = fg.ng.object;
+ const zcu = o.pt.zcu;
+ const ip = &zcu.intern_pool;
+ const msg_nav_index = zcu.panic_messages[@intFromEnum(panic_id)].unwrap().?;
+ const msg_nav = ip.getNav(msg_nav_index);
+ const msg_len = Type.fromInterned(msg_nav.typeOf(ip)).childType(zcu).arrayLen(zcu);
+ const msg_ptr = try o.lowerValue(msg_nav.status.resolved.val);
const null_opt_addr_global = try fg.resolveNullOptUsize();
- const target = mod.getTarget();
+ const target = zcu.getTarget();
const llvm_usize = try o.lowerType(Type.usize);
// example:
// call fastcc void @test2.panic(
@@ -5531,10 +5501,10 @@ pub const FuncGen = struct {
// ptr null, ; stack trace
// ptr @2, ; addr (null ?usize)
// )
- const panic_func = mod.funcInfo(mod.panic_func_index);
- const panic_decl = mod.declPtr(panic_func.owner_decl);
- const fn_info = mod.typeToFunc(panic_decl.typeOf(mod)).?;
- const panic_global = try o.resolveLlvmFunction(panic_func.owner_decl);
+ const panic_func = zcu.funcInfo(zcu.panic_func_index);
+ const panic_nav = ip.getNav(panic_func.owner_nav);
+ const fn_info = zcu.typeToFunc(Type.fromInterned(panic_nav.typeOf(ip))).?;
+ const panic_global = try o.resolveLlvmFunction(panic_func.owner_nav);
_ = try fg.wip.call(
.normal,
toLlvmCallConv(fn_info.cc, target),
@@ -5553,9 +5523,10 @@ pub const FuncGen = struct {
}
fn airRet(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
+ const ip = &mod.intern_pool;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ret_ty = self.typeOf(un_op);
@@ -5581,7 +5552,7 @@ pub const FuncGen = struct {
len,
if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal,
);
- const owner_mod = self.dg.ownerModule();
+ const owner_mod = self.ng.ownerModule();
if (owner_mod.valgrind) {
try self.valgrindMarkUndef(self.ret_ptr, len);
}
@@ -5602,7 +5573,7 @@ pub const FuncGen = struct {
_ = try self.wip.retVoid();
return .none;
}
- const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?;
+ const fn_info = mod.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
@@ -5631,7 +5602,7 @@ pub const FuncGen = struct {
len,
.normal,
);
- const owner_mod = self.dg.ownerModule();
+ const owner_mod = self.ng.ownerModule();
if (owner_mod.valgrind) {
try self.valgrindMarkUndef(rp, len);
}
@@ -5659,13 +5630,14 @@ pub const FuncGen = struct {
}
fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
+ const ip = &mod.intern_pool;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ptr_ty = self.typeOf(un_op);
const ret_ty = ptr_ty.childType(mod);
- const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?;
+ const fn_info = mod.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
@@ -5689,7 +5661,7 @@ pub const FuncGen = struct {
}
fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const list = try self.resolveInst(ty_op.operand);
const arg_ty = ty_op.ty.toType();
@@ -5699,7 +5671,7 @@ pub const FuncGen = struct {
}
fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src_list = try self.resolveInst(ty_op.operand);
@@ -5725,7 +5697,7 @@ pub const FuncGen = struct {
}
fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const va_list_ty = self.typeOfIndex(inst);
const llvm_va_list_ty = try o.lowerType(va_list_ty);
@@ -5767,7 +5739,7 @@ pub const FuncGen = struct {
}
fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const llvm_fn = try o.getCmpLtErrorsLenFunction();
@@ -5790,7 +5762,7 @@ pub const FuncGen = struct {
lhs: Builder.Value,
rhs: Builder.Value,
) Allocator.Error!Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const scalar_ty = operand_ty.scalarType(mod);
@@ -5897,7 +5869,7 @@ pub const FuncGen = struct {
maybe_inline_func: ?InternPool.Index,
body: []const Air.Inst.Index,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const inst_ty = self.typeOfIndex(inst);
@@ -5948,7 +5920,7 @@ pub const FuncGen = struct {
}
fn airBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
const block = self.blocks.get(branch.block_inst).?;
@@ -5988,7 +5960,7 @@ pub const FuncGen = struct {
}
fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const inst = body_tail[0];
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
@@ -6003,7 +5975,7 @@ pub const FuncGen = struct {
}
fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
@@ -6023,7 +5995,7 @@ pub const FuncGen = struct {
can_elide_load: bool,
is_unused: bool,
) !Builder.Value {
- const o = fg.dg.object;
+ const o = fg.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const payload_ty = err_union_ty.errorUnionPayload(mod);
@@ -6088,7 +6060,7 @@ pub const FuncGen = struct {
}
fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
@@ -6152,7 +6124,7 @@ pub const FuncGen = struct {
}
fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
@@ -6176,7 +6148,7 @@ pub const FuncGen = struct {
}
fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -6195,7 +6167,7 @@ pub const FuncGen = struct {
}
fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -6280,7 +6252,7 @@ pub const FuncGen = struct {
) !Builder.Value {
_ = fast;
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const target = mod.getTarget();
@@ -6342,13 +6314,13 @@ pub const FuncGen = struct {
}
fn sliceOrArrayPtr(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value {
- const o = fg.dg.object;
+ const o = fg.ng.object;
const mod = o.pt.zcu;
return if (ty.isSlice(mod)) fg.wip.extractValue(ptr, &.{0}, "") else ptr;
}
fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value {
- const o = fg.dg.object;
+ const o = fg.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const llvm_usize = try o.lowerType(Type.usize);
@@ -6378,7 +6350,7 @@ pub const FuncGen = struct {
}
fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const slice_ptr = try self.resolveInst(ty_op.operand);
@@ -6389,7 +6361,7 @@ pub const FuncGen = struct {
}
fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const inst = body_tail[0];
@@ -6413,7 +6385,7 @@ pub const FuncGen = struct {
}
fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -6427,7 +6399,7 @@ pub const FuncGen = struct {
}
fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const inst = body_tail[0];
@@ -6460,7 +6432,7 @@ pub const FuncGen = struct {
}
fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const inst = body_tail[0];
@@ -6486,7 +6458,7 @@ pub const FuncGen = struct {
}
fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -6529,7 +6501,7 @@ pub const FuncGen = struct {
}
fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const inst = body_tail[0];
@@ -6635,7 +6607,7 @@ pub const FuncGen = struct {
}
fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -6697,7 +6669,7 @@ pub const FuncGen = struct {
}
fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const operand = try self.resolveInst(pl_op.operand);
@@ -6729,7 +6701,7 @@ pub const FuncGen = struct {
}
fn airDbgVarVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const operand = try self.resolveInst(pl_op.operand);
const operand_ty = self.typeOf(pl_op.operand);
@@ -6746,7 +6718,7 @@ pub const FuncGen = struct {
);
const pt = o.pt;
- const owner_mod = self.dg.ownerModule();
+ const owner_mod = self.ng.ownerModule();
if (isByRef(operand_ty, pt)) {
_ = try self.wip.callIntrinsic(
.normal,
@@ -6800,7 +6772,7 @@ pub const FuncGen = struct {
// We don't have such an assembler implemented yet though. For now,
// this implementation feeds the inline assembly code directly to LLVM.
- const o = self.dg.object;
+ const o = self.ng.object;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
@@ -7181,7 +7153,7 @@ pub const FuncGen = struct {
operand_is_ptr: bool,
cond: Builder.IntegerCondition,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@@ -7226,7 +7198,7 @@ pub const FuncGen = struct {
cond: Builder.IntegerCondition,
operand_is_ptr: bool,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@@ -7266,7 +7238,7 @@ pub const FuncGen = struct {
}
fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -7288,7 +7260,7 @@ pub const FuncGen = struct {
fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
comptime assert(optional_layout_version == 3);
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -7320,7 +7292,7 @@ pub const FuncGen = struct {
}
fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const inst = body_tail[0];
@@ -7345,7 +7317,7 @@ pub const FuncGen = struct {
body_tail: []const Air.Inst.Index,
operand_is_ptr: bool,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const inst = body_tail[0];
@@ -7381,7 +7353,7 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
operand_is_ptr: bool,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -7415,7 +7387,7 @@ pub const FuncGen = struct {
}
fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -7456,7 +7428,7 @@ pub const FuncGen = struct {
}
fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
@@ -7502,7 +7474,7 @@ pub const FuncGen = struct {
}
fn airWrapOptional(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const inst = body_tail[0];
@@ -7536,7 +7508,7 @@ pub const FuncGen = struct {
}
fn airWrapErrUnionPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -7577,7 +7549,7 @@ pub const FuncGen = struct {
}
fn airWrapErrUnionErr(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const inst = body_tail[0];
@@ -7618,7 +7590,7 @@ pub const FuncGen = struct {
}
fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const index = pl_op.payload;
const llvm_usize = try o.lowerType(Type.usize);
@@ -7628,7 +7600,7 @@ pub const FuncGen = struct {
}
fn airWasmMemoryGrow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const index = pl_op.payload;
const llvm_isize = try o.lowerType(Type.isize);
@@ -7638,7 +7610,7 @@ pub const FuncGen = struct {
}
fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
@@ -7661,7 +7633,7 @@ pub const FuncGen = struct {
}
fn airMin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7681,7 +7653,7 @@ pub const FuncGen = struct {
}
fn airMax(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7701,7 +7673,7 @@ pub const FuncGen = struct {
}
fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
@@ -7711,7 +7683,7 @@ pub const FuncGen = struct {
}
fn airAdd(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7729,7 +7701,7 @@ pub const FuncGen = struct {
signed_intrinsic: Builder.Intrinsic,
unsigned_intrinsic: Builder.Intrinsic,
) !Builder.Value {
- const o = fg.dg.object;
+ const o = fg.ng.object;
const mod = o.pt.zcu;
const bin_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -7777,7 +7749,7 @@ pub const FuncGen = struct {
}
fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7797,7 +7769,7 @@ pub const FuncGen = struct {
}
fn airSub(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7818,7 +7790,7 @@ pub const FuncGen = struct {
}
fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7838,7 +7810,7 @@ pub const FuncGen = struct {
}
fn airMul(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7859,7 +7831,7 @@ pub const FuncGen = struct {
}
fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7888,7 +7860,7 @@ pub const FuncGen = struct {
}
fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7904,7 +7876,7 @@ pub const FuncGen = struct {
}
fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7936,7 +7908,7 @@ pub const FuncGen = struct {
}
fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7954,7 +7926,7 @@ pub const FuncGen = struct {
}
fn airRem(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7971,7 +7943,7 @@ pub const FuncGen = struct {
}
fn airMod(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -8007,7 +7979,7 @@ pub const FuncGen = struct {
}
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -8029,7 +8001,7 @@ pub const FuncGen = struct {
}
fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -8057,7 +8029,7 @@ pub const FuncGen = struct {
signed_intrinsic: Builder.Intrinsic,
unsigned_intrinsic: Builder.Intrinsic,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -8111,7 +8083,7 @@ pub const FuncGen = struct {
result_vector: Builder.Value,
vector_len: usize,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
assert(args_vectors.len <= 3);
var i: usize = 0;
@@ -8143,7 +8115,7 @@ pub const FuncGen = struct {
param_types: []const Builder.Type,
return_type: Builder.Type,
) Allocator.Error!Builder.Function.Index {
- const o = self.dg.object;
+ const o = self.ng.object;
if (o.builder.getGlobal(fn_name)) |global| return switch (global.ptrConst(&o.builder).kind) {
.alias => |alias| alias.getAliasee(&o.builder).ptrConst(&o.builder).kind.function,
.function => |function| function,
@@ -8165,7 +8137,7 @@ pub const FuncGen = struct {
ty: Type,
params: [2]Builder.Value,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const target = mod.getTarget();
const scalar_ty = ty.scalarType(mod);
@@ -8271,7 +8243,7 @@ pub const FuncGen = struct {
comptime params_len: usize,
params: [params_len]Builder.Value,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const target = mod.getTarget();
const scalar_ty = ty.scalarType(mod);
@@ -8412,7 +8384,7 @@ pub const FuncGen = struct {
}
fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -8483,7 +8455,7 @@ pub const FuncGen = struct {
}
fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -8501,7 +8473,7 @@ pub const FuncGen = struct {
}
fn airShl(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -8514,7 +8486,7 @@ pub const FuncGen = struct {
}
fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -8557,7 +8529,7 @@ pub const FuncGen = struct {
}
fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -8576,7 +8548,7 @@ pub const FuncGen = struct {
}
fn airAbs(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@@ -8598,7 +8570,7 @@ pub const FuncGen = struct {
}
fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dest_ty = self.typeOfIndex(inst);
@@ -8614,7 +8586,7 @@ pub const FuncGen = struct {
}
fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const dest_llvm_ty = try o.lowerType(self.typeOfIndex(inst));
@@ -8622,7 +8594,7 @@ pub const FuncGen = struct {
}
fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@@ -8656,7 +8628,7 @@ pub const FuncGen = struct {
}
fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@@ -8696,7 +8668,7 @@ pub const FuncGen = struct {
}
fn airIntFromPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
@@ -8714,7 +8686,7 @@ pub const FuncGen = struct {
}
fn bitCast(self: *FuncGen, operand: Builder.Value, operand_ty: Type, inst_ty: Type) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const operand_is_ref = isByRef(operand_ty, pt);
@@ -8739,7 +8711,7 @@ pub const FuncGen = struct {
if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) {
const elem_ty = operand_ty.childType(mod);
if (!result_is_ref) {
- return self.dg.todo("implement bitcast vector to non-ref array", .{});
+ return self.ng.todo("implement bitcast vector to non-ref array", .{});
}
const alignment = inst_ty.abiAlignment(pt).toLlvm();
const array_ptr = try self.buildAllocaWorkaround(inst_ty, alignment);
@@ -8766,7 +8738,7 @@ pub const FuncGen = struct {
} else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) {
const elem_ty = operand_ty.childType(mod);
const llvm_vector_ty = try o.lowerType(inst_ty);
- if (!operand_is_ref) return self.dg.todo("implement bitcast non-ref array to vector", .{});
+ if (!operand_is_ref) return self.ng.todo("implement bitcast non-ref array to vector", .{});
const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8;
if (bitcast_ok) {
@@ -8831,9 +8803,9 @@ pub const FuncGen = struct {
}
fn airArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const arg_val = self.args[self.arg_index];
self.arg_index += 1;
@@ -8846,9 +8818,8 @@ pub const FuncGen = struct {
const name = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
if (name == .none) return arg_val;
- const func_index = self.dg.decl.getOwnedFunctionIndex();
- const func = mod.funcInfo(func_index);
- const lbrace_line = mod.declPtr(func.owner_decl).navSrcLine(mod) + func.lbrace_line + 1;
+ const func = zcu.funcInfo(zcu.navValue(self.ng.nav_index).toIntern());
+ const lbrace_line = zcu.navSrcLine(func.owner_nav) + func.lbrace_line + 1;
const lbrace_col = func.lbrace_column + 1;
const debug_parameter = try o.builder.debugParameter(
@@ -8870,7 +8841,7 @@ pub const FuncGen = struct {
},
};
- const owner_mod = self.dg.ownerModule();
+ const mod = self.ng.ownerModule();
if (isByRef(inst_ty, pt)) {
_ = try self.wip.callIntrinsic(
.normal,
@@ -8884,7 +8855,7 @@ pub const FuncGen = struct {
},
"",
);
- } else if (owner_mod.optimize_mode == .Debug) {
+ } else if (mod.optimize_mode == .Debug) {
const alignment = inst_ty.abiAlignment(pt).toLlvm();
const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, arg_val, alloca, alignment);
@@ -8920,7 +8891,7 @@ pub const FuncGen = struct {
}
fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
@@ -8934,7 +8905,7 @@ pub const FuncGen = struct {
}
fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
@@ -8954,7 +8925,7 @@ pub const FuncGen = struct {
llvm_ty: Builder.Type,
alignment: Builder.Alignment,
) Allocator.Error!Builder.Value {
- const target = self.dg.object.pt.zcu.getTarget();
+ const target = self.ng.object.pt.zcu.getTarget();
return buildAllocaInner(&self.wip, llvm_ty, alignment, target);
}
@@ -8964,12 +8935,12 @@ pub const FuncGen = struct {
ty: Type,
alignment: Builder.Alignment,
) Allocator.Error!Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.pt), .i8), alignment);
}
fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -8979,7 +8950,7 @@ pub const FuncGen = struct {
const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(mod) else false;
if (val_is_undef) {
- const owner_mod = self.dg.ownerModule();
+ const owner_mod = self.ng.ownerModule();
// Even if safety is disabled, we still emit a memset to undefined since it conveys
// extra information to LLVM, and LLVM will optimize it out. Safety makes the difference
@@ -9029,7 +9000,7 @@ pub const FuncGen = struct {
///
/// The first instruction of `body_tail` is the one whose copy we want to elide.
fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool {
- const o = fg.dg.object;
+ const o = fg.ng.object;
const mod = o.pt.zcu;
const ip = &mod.intern_pool;
for (body_tail[1..]) |body_inst| {
@@ -9045,7 +9016,7 @@ pub const FuncGen = struct {
}
fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
- const o = fg.dg.object;
+ const o = fg.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const inst = body_tail[0];
@@ -9077,7 +9048,7 @@ pub const FuncGen = struct {
fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
_ = inst;
- const o = self.dg.object;
+ const o = self.ng.object;
const llvm_usize = try o.lowerType(Type.usize);
if (!target_util.supportsReturnAddress(o.pt.zcu.getTarget())) {
// https://github.com/ziglang/zig/issues/11946
@@ -9089,7 +9060,7 @@ pub const FuncGen = struct {
fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
_ = inst;
- const o = self.dg.object;
+ const o = self.ng.object;
const result = try self.wip.callIntrinsic(.normal, .none, .frameaddress, &.{.ptr}, &.{.@"0"}, "");
return self.wip.cast(.ptrtoint, result, try o.lowerType(Type.usize), "");
}
@@ -9106,7 +9077,7 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
kind: Builder.Function.Instruction.CmpXchg.Kind,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -9157,7 +9128,7 @@ pub const FuncGen = struct {
}
fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
@@ -9221,7 +9192,7 @@ pub const FuncGen = struct {
}
fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
@@ -9269,7 +9240,7 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
ordering: Builder.AtomicOrdering,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -9294,7 +9265,7 @@ pub const FuncGen = struct {
}
fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -9329,7 +9300,7 @@ pub const FuncGen = struct {
} else {
_ = try self.wip.callMemSet(dest_ptr, dest_ptr_align, fill_byte, len, access_kind);
}
- const owner_mod = self.dg.ownerModule();
+ const owner_mod = self.ng.ownerModule();
if (safety and owner_mod.valgrind) {
try self.valgrindMarkUndef(dest_ptr, len);
}
@@ -9435,7 +9406,7 @@ pub const FuncGen = struct {
dest_ptr_align: Builder.Alignment,
access_kind: Builder.MemoryAccessKind,
) !void {
- const o = self.dg.object;
+ const o = self.ng.object;
const usize_zero = try o.builder.intValue(try o.lowerType(Type.usize), 0);
const cond = try self.cmp(.normal, .neq, Type.usize, len, usize_zero);
const memset_block = try self.wip.block(1, "MemsetTrapSkip");
@@ -9448,7 +9419,7 @@ pub const FuncGen = struct {
}
fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -9502,7 +9473,7 @@ pub const FuncGen = struct {
}
fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -9524,7 +9495,7 @@ pub const FuncGen = struct {
}
fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const un_ty = self.typeOf(ty_op.operand);
@@ -9563,7 +9534,7 @@ pub const FuncGen = struct {
}
fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, intrinsic: Builder.Intrinsic) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = self.typeOfIndex(inst);
const operand_ty = self.typeOf(ty_op.operand);
@@ -9581,7 +9552,7 @@ pub const FuncGen = struct {
}
fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, intrinsic: Builder.Intrinsic) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = self.typeOfIndex(inst);
const operand_ty = self.typeOf(ty_op.operand);
@@ -9599,7 +9570,7 @@ pub const FuncGen = struct {
}
fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
@@ -9633,7 +9604,7 @@ pub const FuncGen = struct {
}
fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const ip = &mod.intern_pool;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -9665,7 +9636,7 @@ pub const FuncGen = struct {
}
fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const enum_ty = self.typeOf(un_op);
@@ -9683,22 +9654,21 @@ pub const FuncGen = struct {
}
fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const enum_type = ip.loadEnumType(enum_ty.toIntern());
// TODO: detect when the type changes and re-emit this function.
- const gop = try o.named_enum_map.getOrPut(o.gpa, enum_type.decl);
+ const gop = try o.named_enum_map.getOrPut(o.gpa, enum_ty.toIntern());
if (gop.found_existing) return gop.value_ptr.*;
- errdefer assert(o.named_enum_map.remove(enum_type.decl));
+ errdefer assert(o.named_enum_map.remove(enum_ty.toIntern()));
- const decl = zcu.declPtr(enum_type.decl);
const target = zcu.root_mod.resolved_target.result;
const function_index = try o.builder.addFunction(
try o.builder.fnType(.i1, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal),
- try o.builder.strtabStringFmt("__zig_is_named_enum_value_{}", .{decl.fqn.fmt(ip)}),
+ try o.builder.strtabStringFmt("__zig_is_named_enum_value_{}", .{enum_type.name.fmt(ip)}),
toLlvmAddressSpace(.generic, target),
);
@@ -9741,7 +9711,7 @@ pub const FuncGen = struct {
}
fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const enum_ty = self.typeOf(un_op);
@@ -9759,7 +9729,7 @@ pub const FuncGen = struct {
}
fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const slice_ty = self.typeOfIndex(inst);
@@ -9774,7 +9744,7 @@ pub const FuncGen = struct {
}
fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const scalar = try self.resolveInst(ty_op.operand);
const vector_ty = self.typeOfIndex(inst);
@@ -9792,7 +9762,7 @@ pub const FuncGen = struct {
}
fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -9848,7 +9818,7 @@ pub const FuncGen = struct {
vector_len: usize,
accum_init: Builder.Value,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const usize_ty = try o.lowerType(Type.usize);
const llvm_vector_len = try o.builder.intValue(usize_ty, vector_len);
const llvm_result_ty = accum_init.typeOfWip(&self.wip);
@@ -9902,7 +9872,7 @@ pub const FuncGen = struct {
}
fn airReduce(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const mod = o.pt.zcu;
const target = mod.getTarget();
@@ -10012,7 +9982,7 @@ pub const FuncGen = struct {
}
fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
@@ -10133,7 +10103,7 @@ pub const FuncGen = struct {
}
fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
@@ -10256,7 +10226,7 @@ pub const FuncGen = struct {
}
fn airPrefetch(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const prefetch = self.air.instructions.items(.data)[@intFromEnum(inst)].prefetch;
comptime assert(@intFromEnum(std.builtin.PrefetchOptions.Rw.read) == 0);
@@ -10306,7 +10276,7 @@ pub const FuncGen = struct {
}
fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const inst_ty = self.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
@@ -10324,12 +10294,12 @@ pub const FuncGen = struct {
0 => @field(Builder.Intrinsic, basename ++ ".x"),
1 => @field(Builder.Intrinsic, basename ++ ".y"),
2 => @field(Builder.Intrinsic, basename ++ ".z"),
- else => return self.dg.object.builder.intValue(.i32, default),
+ else => return self.ng.object.builder.intValue(.i32, default),
}, &.{}, &.{}, "");
}
fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const target = o.pt.zcu.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
@@ -10339,7 +10309,7 @@ pub const FuncGen = struct {
}
fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const target = o.pt.zcu.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
@@ -10362,7 +10332,7 @@ pub const FuncGen = struct {
}
fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const target = o.pt.zcu.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
@@ -10372,7 +10342,7 @@ pub const FuncGen = struct {
}
fn getErrorNameTable(self: *FuncGen) Allocator.Error!Builder.Variable.Index {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const table = o.error_name_table;
@@ -10401,7 +10371,7 @@ pub const FuncGen = struct {
opt_handle: Builder.Value,
is_by_ref: bool,
) Allocator.Error!Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const field = b: {
if (is_by_ref) {
const field_ptr = try self.wip.gepStruct(opt_llvm_ty, opt_handle, 1, "");
@@ -10422,7 +10392,7 @@ pub const FuncGen = struct {
opt_ty: Type,
can_elide_load: bool,
) !Builder.Value {
- const o = fg.dg.object;
+ const o = fg.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const payload_ty = opt_ty.optionalChild(mod);
@@ -10451,7 +10421,7 @@ pub const FuncGen = struct {
payload: Builder.Value,
non_null_bit: Builder.Value,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const optional_llvm_ty = try o.lowerType(optional_ty);
const non_null_field = try self.wip.cast(.zext, non_null_bit, .i8, "");
@@ -10483,7 +10453,7 @@ pub const FuncGen = struct {
struct_ptr_ty: Type,
field_index: u32,
) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const struct_ty = struct_ptr_ty.childType(mod);
@@ -10552,7 +10522,7 @@ pub const FuncGen = struct {
// "When loading a value of a type like i20 with a size that is not an integral number of bytes, the result is undefined if the value was not originally written using a store of the same type. "
// => so load the byte aligned value and trunc the unwanted bits.
- const o = fg.dg.object;
+ const o = fg.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const payload_llvm_ty = try o.lowerType(payload_ty);
@@ -10599,7 +10569,7 @@ pub const FuncGen = struct {
ptr_alignment: Builder.Alignment,
access_kind: Builder.MemoryAccessKind,
) !Builder.Value {
- const o = fg.dg.object;
+ const o = fg.ng.object;
const pt = o.pt;
//const pointee_llvm_ty = try o.lowerType(pointee_type);
const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(pt)).toLlvm();
@@ -10620,7 +10590,7 @@ pub const FuncGen = struct {
/// alloca and copies the value into it, then returns the alloca instruction.
/// For isByRef=false types, it creates a load instruction and returns it.
fn load(self: *FuncGen, ptr: Builder.Value, ptr_ty: Type) !Builder.Value {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const info = ptr_ty.ptrInfo(mod);
@@ -10693,7 +10663,7 @@ pub const FuncGen = struct {
elem: Builder.Value,
ordering: Builder.AtomicOrdering,
) !void {
- const o = self.dg.object;
+ const o = self.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const info = ptr_ty.ptrInfo(mod);
@@ -10784,7 +10754,7 @@ pub const FuncGen = struct {
fn valgrindMarkUndef(fg: *FuncGen, ptr: Builder.Value, len: Builder.Value) Allocator.Error!void {
const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545;
- const o = fg.dg.object;
+ const o = fg.ng.object;
const usize_ty = try o.lowerType(Type.usize);
const zero = try o.builder.intValue(usize_ty, 0);
const req = try o.builder.intValue(usize_ty, VG_USERREQ__MAKE_MEM_UNDEFINED);
@@ -10802,7 +10772,7 @@ pub const FuncGen = struct {
a4: Builder.Value,
a5: Builder.Value,
) Allocator.Error!Builder.Value {
- const o = fg.dg.object;
+ const o = fg.ng.object;
const pt = o.pt;
const mod = pt.zcu;
const target = mod.getTarget();
@@ -10869,13 +10839,13 @@ pub const FuncGen = struct {
}
fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type {
- const o = fg.dg.object;
+ const o = fg.ng.object;
const mod = o.pt.zcu;
return fg.air.typeOf(inst, &mod.intern_pool);
}
fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type {
- const o = fg.dg.object;
+ const o = fg.ng.object;
const mod = o.pt.zcu;
return fg.air.typeOfIndex(inst, &mod.intern_pool);
}
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 23911c4fbf..b13be401ab 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -31,9 +31,9 @@ const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef);
pub const zig_call_abi_ver = 3;
-const InternMap = std.AutoHashMapUnmanaged(struct { InternPool.Index, DeclGen.Repr }, IdResult);
+const InternMap = std.AutoHashMapUnmanaged(struct { InternPool.Index, NavGen.Repr }, IdResult);
const PtrTypeMap = std.AutoHashMapUnmanaged(
- struct { InternPool.Index, StorageClass, DeclGen.Repr },
+ struct { InternPool.Index, StorageClass, NavGen.Repr },
struct { ty_id: IdRef, fwd_emitted: bool },
);
@@ -142,7 +142,7 @@ const ControlFlow = union(enum) {
};
/// This structure holds information that is relevant to the entire compilation,
-/// in contrast to `DeclGen`, which only holds relevant information about a
+/// in contrast to `NavGen`, which only holds relevant information about a
/// single decl.
pub const Object = struct {
/// A general-purpose allocator that can be used for any allocation for this Object.
@@ -153,10 +153,10 @@ pub const Object = struct {
/// The Zig module that this object file is generated for.
/// A map of Zig decl indices to SPIR-V decl indices.
- decl_link: std.AutoHashMapUnmanaged(InternPool.DeclIndex, SpvModule.Decl.Index) = .{},
+ nav_link: std.AutoHashMapUnmanaged(InternPool.Nav.Index, SpvModule.Decl.Index) = .{},
/// A map of Zig InternPool indices for anonymous decls to SPIR-V decl indices.
- anon_decl_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, SpvModule.Decl.Index) = .{},
+ uav_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, SpvModule.Decl.Index) = .{},
/// A map that maps AIR intern pool indices to SPIR-V result-ids.
intern_map: InternMap = .{},
@@ -178,31 +178,29 @@ pub const Object = struct {
pub fn deinit(self: *Object) void {
self.spv.deinit();
- self.decl_link.deinit(self.gpa);
- self.anon_decl_link.deinit(self.gpa);
+ self.nav_link.deinit(self.gpa);
+ self.uav_link.deinit(self.gpa);
self.intern_map.deinit(self.gpa);
self.ptr_types.deinit(self.gpa);
}
- fn genDecl(
+ fn genNav(
self: *Object,
pt: Zcu.PerThread,
- decl_index: InternPool.DeclIndex,
+ nav_index: InternPool.Nav.Index,
air: Air,
liveness: Liveness,
) !void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
- const decl = zcu.declPtr(decl_index);
- const namespace = zcu.namespacePtr(decl.src_namespace);
- const structured_cfg = namespace.fileScope(zcu).mod.structured_cfg;
+ const structured_cfg = zcu.navFileScope(nav_index).mod.structured_cfg;
- var decl_gen = DeclGen{
+ var nav_gen = NavGen{
.gpa = gpa,
.object = self,
.pt = pt,
.spv = &self.spv,
- .decl_index = decl_index,
+ .owner_nav = nav_index,
.air = air,
.liveness = liveness,
.intern_map = &self.intern_map,
@@ -212,18 +210,18 @@ pub const Object = struct {
false => .{ .unstructured = .{} },
},
.current_block_label = undefined,
- .base_line = decl.navSrcLine(zcu),
+ .base_line = zcu.navSrcLine(nav_index),
};
- defer decl_gen.deinit();
+ defer nav_gen.deinit();
- decl_gen.genDecl() catch |err| switch (err) {
+ nav_gen.genNav() catch |err| switch (err) {
error.CodegenFail => {
- try zcu.failed_analysis.put(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), decl_gen.error_msg.?);
+ try zcu.failed_codegen.put(gpa, nav_index, nav_gen.error_msg.?);
},
else => |other| {
// There might be an error that happened *after* self.error_msg
// was already allocated, so be sure to free it.
- if (decl_gen.error_msg) |error_msg| {
+ if (nav_gen.error_msg) |error_msg| {
error_msg.deinit(gpa);
}
@@ -239,31 +237,30 @@ pub const Object = struct {
air: Air,
liveness: Liveness,
) !void {
- const decl_index = pt.zcu.funcInfo(func_index).owner_decl;
+ const nav = pt.zcu.funcInfo(func_index).owner_nav;
// TODO: Separate types for generating decls and functions?
- try self.genDecl(pt, decl_index, air, liveness);
+ try self.genNav(pt, nav, air, liveness);
}
- pub fn updateDecl(
+ pub fn updateNav(
self: *Object,
pt: Zcu.PerThread,
- decl_index: InternPool.DeclIndex,
+ nav: InternPool.Nav.Index,
) !void {
- try self.genDecl(pt, decl_index, undefined, undefined);
+ try self.genNav(pt, nav, undefined, undefined);
}
- /// Fetch or allocate a result id for decl index. This function also marks the decl as alive.
- /// Note: Function does not actually generate the decl, it just allocates an index.
- pub fn resolveDecl(self: *Object, zcu: *Zcu, decl_index: InternPool.DeclIndex) !SpvModule.Decl.Index {
- const decl = zcu.declPtr(decl_index);
- assert(decl.has_tv); // TODO: Do we need to handle a situation where this is false?
-
- const entry = try self.decl_link.getOrPut(self.gpa, decl_index);
+ /// Fetch or allocate a result id for nav index. This function also marks the nav as alive.
+ /// Note: Function does not actually generate the nav, it just allocates an index.
+ pub fn resolveNav(self: *Object, zcu: *Zcu, nav_index: InternPool.Nav.Index) !SpvModule.Decl.Index {
+ const ip = &zcu.intern_pool;
+ const entry = try self.nav_link.getOrPut(self.gpa, nav_index);
if (!entry.found_existing) {
+ const nav = ip.getNav(nav_index);
// TODO: Extern fn?
- const kind: SpvModule.Decl.Kind = if (decl.val.isFuncBody(zcu))
+ const kind: SpvModule.Decl.Kind = if (ip.isFunctionType(nav.typeOf(ip)))
.func
- else switch (decl.@"addrspace") {
+ else switch (nav.status.resolved.@"addrspace") {
.generic => .invocation_global,
else => .global,
};
@@ -276,8 +273,8 @@ pub const Object = struct {
};
/// This structure is used to compile a declaration, and contains all relevant meta-information to deal with that.
-const DeclGen = struct {
- /// A general-purpose allocator that can be used for any allocations for this DeclGen.
+const NavGen = struct {
+ /// A general-purpose allocator that can be used for any allocations for this NavGen.
gpa: Allocator,
/// The object that this decl is generated into.
@@ -291,7 +288,7 @@ const DeclGen = struct {
spv: *SpvModule,
/// The decl we are currently generating code for.
- decl_index: InternPool.DeclIndex,
+ owner_nav: InternPool.Nav.Index,
/// The intermediate code of the declaration we are currently generating. Note: If
/// the declaration is not a function, this value will be undefined!
@@ -399,8 +396,8 @@ const DeclGen = struct {
indirect,
};
- /// Free resources owned by the DeclGen.
- pub fn deinit(self: *DeclGen) void {
+ /// Free resources owned by the NavGen.
+ pub fn deinit(self: *NavGen) void {
self.args.deinit(self.gpa);
self.inst_results.deinit(self.gpa);
self.control_flow.deinit(self.gpa);
@@ -408,26 +405,26 @@ const DeclGen = struct {
}
/// Return the target which we are currently compiling for.
- pub fn getTarget(self: *DeclGen) std.Target {
+ pub fn getTarget(self: *NavGen) std.Target {
return self.pt.zcu.getTarget();
}
- pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
+ pub fn fail(self: *NavGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
const zcu = self.pt.zcu;
- const src_loc = zcu.declPtr(self.decl_index).navSrcLoc(zcu);
+ const src_loc = zcu.navSrcLoc(self.owner_nav);
assert(self.error_msg == null);
self.error_msg = try Zcu.ErrorMsg.create(zcu.gpa, src_loc, format, args);
return error.CodegenFail;
}
- pub fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
+ pub fn todo(self: *NavGen, comptime format: []const u8, args: anytype) Error {
return self.fail("TODO (SPIR-V): " ++ format, args);
}
/// This imports the "default" extended instruction set for the target
/// For OpenCL, OpenCL.std.100. For Vulkan, GLSL.std.450.
- fn importExtendedSet(self: *DeclGen) !IdResult {
+ fn importExtendedSet(self: *NavGen) !IdResult {
const target = self.getTarget();
return switch (target.os.tag) {
.opencl => try self.spv.importInstructionSet(.@"OpenCL.std"),
@@ -437,18 +434,18 @@ const DeclGen = struct {
}
/// Fetch the result-id for a previously generated instruction or constant.
- fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef {
+ fn resolve(self: *NavGen, inst: Air.Inst.Ref) !IdRef {
const pt = self.pt;
const mod = pt.zcu;
if (try self.air.value(inst, pt)) |val| {
const ty = self.typeOf(inst);
if (ty.zigTypeTag(mod) == .Fn) {
- const fn_decl_index = switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .extern_func => |extern_func| extern_func.decl,
- .func => |func| func.owner_decl,
+ const fn_nav = switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .@"extern" => |@"extern"| @"extern".owner_nav,
+ .func => |func| func.owner_nav,
else => unreachable,
};
- const spv_decl_index = try self.object.resolveDecl(mod, fn_decl_index);
+ const spv_decl_index = try self.object.resolveNav(mod, fn_nav);
try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {});
return self.spv.declPtr(spv_decl_index).result_id;
}
@@ -459,7 +456,7 @@ const DeclGen = struct {
return self.inst_results.get(index).?; // Assertion means instruction does not dominate usage.
}
- fn resolveAnonDecl(self: *DeclGen, val: InternPool.Index) !IdRef {
+ fn resolveUav(self: *NavGen, val: InternPool.Index) !IdRef {
// TODO: This cannot be a function at this point, but it should probably be handled anyway.
const mod = self.pt.zcu;
@@ -467,7 +464,7 @@ const DeclGen = struct {
const decl_ptr_ty_id = try self.ptrType(ty, .Generic);
const spv_decl_index = blk: {
- const entry = try self.object.anon_decl_link.getOrPut(self.object.gpa, .{ val, .Function });
+ const entry = try self.object.uav_link.getOrPut(self.object.gpa, .{ val, .Function });
if (entry.found_existing) {
try self.addFunctionDep(entry.value_ptr.*, .Function);
@@ -540,7 +537,7 @@ const DeclGen = struct {
return try self.castToGeneric(decl_ptr_ty_id, result_id);
}
- fn addFunctionDep(self: *DeclGen, decl_index: SpvModule.Decl.Index, storage_class: StorageClass) !void {
+ fn addFunctionDep(self: *NavGen, decl_index: SpvModule.Decl.Index, storage_class: StorageClass) !void {
const target = self.getTarget();
if (target.os.tag == .vulkan) {
// Shader entry point dependencies must be variables with Input or Output storage class
@@ -555,7 +552,7 @@ const DeclGen = struct {
}
}
- fn castToGeneric(self: *DeclGen, type_id: IdRef, ptr_id: IdRef) !IdRef {
+ fn castToGeneric(self: *NavGen, type_id: IdRef, ptr_id: IdRef) !IdRef {
const target = self.getTarget();
if (target.os.tag == .vulkan) {
@@ -575,7 +572,7 @@ const DeclGen = struct {
/// block we are currently generating.
/// Note that there is no such thing as nested blocks like in ZIR or AIR, so we don't need to
/// keep track of the previous block.
- fn beginSpvBlock(self: *DeclGen, label: IdResult) !void {
+ fn beginSpvBlock(self: *NavGen, label: IdResult) !void {
try self.func.body.emit(self.spv.gpa, .OpLabel, .{ .id_result = label });
self.current_block_label = label;
}
@@ -590,7 +587,7 @@ const DeclGen = struct {
/// TODO: The extension SPV_INTEL_arbitrary_precision_integers allows any integer size (at least up to 32 bits).
/// TODO: This probably needs an ABI-version as well (especially in combination with SPV_INTEL_arbitrary_precision_integers).
/// TODO: Should the result of this function be cached?
- fn backingIntBits(self: *DeclGen, bits: u16) ?u16 {
+ fn backingIntBits(self: *NavGen, bits: u16) ?u16 {
const target = self.getTarget();
// The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function.
@@ -625,7 +622,7 @@ const DeclGen = struct {
/// In theory that could also be used, but since the spec says that it only guarantees support up to 32-bit ints there
/// is no way of knowing whether those are actually supported.
/// TODO: Maybe this should be cached?
- fn largestSupportedIntBits(self: *DeclGen) u16 {
+ fn largestSupportedIntBits(self: *NavGen) u16 {
const target = self.getTarget();
return if (Target.spirv.featureSetHas(target.cpu.features, .Int64))
64
@@ -636,12 +633,12 @@ const DeclGen = struct {
/// Checks whether the type is "composite int", an integer consisting of multiple native integers. These are represented by
/// arrays of largestSupportedIntBits().
/// Asserts `ty` is an integer.
- fn isCompositeInt(self: *DeclGen, ty: Type) bool {
+ fn isCompositeInt(self: *NavGen, ty: Type) bool {
return self.backingIntBits(ty) == null;
}
/// Checks whether the type can be directly translated to SPIR-V vectors
- fn isSpvVector(self: *DeclGen, ty: Type) bool {
+ fn isSpvVector(self: *NavGen, ty: Type) bool {
const mod = self.pt.zcu;
const target = self.getTarget();
if (ty.zigTypeTag(mod) != .Vector) return false;
@@ -667,7 +664,7 @@ const DeclGen = struct {
return is_scalar and (spirv_len or opencl_len);
}
- fn arithmeticTypeInfo(self: *DeclGen, ty: Type) ArithmeticTypeInfo {
+ fn arithmeticTypeInfo(self: *NavGen, ty: Type) ArithmeticTypeInfo {
const mod = self.pt.zcu;
const target = self.getTarget();
var scalar_ty = ty.scalarType(mod);
@@ -715,7 +712,7 @@ const DeclGen = struct {
}
/// Emits a bool constant in a particular representation.
- fn constBool(self: *DeclGen, value: bool, repr: Repr) !IdRef {
+ fn constBool(self: *NavGen, value: bool, repr: Repr) !IdRef {
// TODO: Cache?
const section = &self.spv.sections.types_globals_constants;
@@ -742,7 +739,7 @@ const DeclGen = struct {
/// Emits an integer constant.
/// This function, unlike SpvModule.constInt, takes care to bitcast
/// the value to an unsigned int first for Kernels.
- fn constInt(self: *DeclGen, ty: Type, value: anytype, repr: Repr) !IdRef {
+ fn constInt(self: *NavGen, ty: Type, value: anytype, repr: Repr) !IdRef {
// TODO: Cache?
const mod = self.pt.zcu;
const scalar_ty = ty.scalarType(mod);
@@ -809,7 +806,7 @@ const DeclGen = struct {
/// ty must be a struct type.
/// Constituents should be in `indirect` representation (as the elements of a struct should be).
/// Result is in `direct` representation.
- fn constructStruct(self: *DeclGen, ty: Type, types: []const Type, constituents: []const IdRef) !IdRef {
+ fn constructStruct(self: *NavGen, ty: Type, types: []const Type, constituents: []const IdRef) !IdRef {
assert(types.len == constituents.len);
const result_id = self.spv.allocId();
@@ -823,7 +820,7 @@ const DeclGen = struct {
/// Construct a vector at runtime.
/// ty must be an vector type.
- fn constructVector(self: *DeclGen, ty: Type, constituents: []const IdRef) !IdRef {
+ fn constructVector(self: *NavGen, ty: Type, constituents: []const IdRef) !IdRef {
const mod = self.pt.zcu;
assert(ty.vectorLen(mod) == constituents.len);
@@ -847,7 +844,7 @@ const DeclGen = struct {
/// Construct a vector at runtime with all lanes set to the same value.
/// ty must be an vector type.
- fn constructVectorSplat(self: *DeclGen, ty: Type, constituent: IdRef) !IdRef {
+ fn constructVectorSplat(self: *NavGen, ty: Type, constituent: IdRef) !IdRef {
const mod = self.pt.zcu;
const n = ty.vectorLen(mod);
@@ -862,7 +859,7 @@ const DeclGen = struct {
/// ty must be an array type.
/// Constituents should be in `indirect` representation (as the elements of an array should be).
/// Result is in `direct` representation.
- fn constructArray(self: *DeclGen, ty: Type, constituents: []const IdRef) !IdRef {
+ fn constructArray(self: *NavGen, ty: Type, constituents: []const IdRef) !IdRef {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpCompositeConstruct, .{
.id_result_type = try self.resolveType(ty, .direct),
@@ -878,7 +875,7 @@ const DeclGen = struct {
/// is done by emitting a sequence of instructions that initialize the value.
//
/// This function should only be called during function code generation.
- fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef {
+ fn constant(self: *NavGen, ty: Type, val: Value, repr: Repr) !IdRef {
// Note: Using intern_map can only be used with constants that DO NOT generate any runtime code!!
// Ideally that should be all constants in the future, or it should be cleaned up somehow. For
// now, only use the intern_map on case-by-case basis by breaking to :cache.
@@ -922,7 +919,7 @@ const DeclGen = struct {
.undef => unreachable, // handled above
.variable,
- .extern_func,
+ .@"extern",
.func,
.enum_literal,
.empty_enum_value,
@@ -1142,7 +1139,7 @@ const DeclGen = struct {
return cacheable_id;
}
- fn constantPtr(self: *DeclGen, ptr_val: Value) Error!IdRef {
+ fn constantPtr(self: *NavGen, ptr_val: Value) Error!IdRef {
// TODO: Caching??
const pt = self.pt;
@@ -1160,7 +1157,7 @@ const DeclGen = struct {
return self.derivePtr(derivation);
}
- fn derivePtr(self: *DeclGen, derivation: Value.PointerDeriveStep) Error!IdRef {
+ fn derivePtr(self: *NavGen, derivation: Value.PointerDeriveStep) Error!IdRef {
const pt = self.pt;
const zcu = pt.zcu;
switch (derivation) {
@@ -1178,13 +1175,13 @@ const DeclGen = struct {
});
return result_ptr_id;
},
- .decl_ptr => |decl| {
- const result_ptr_ty = try zcu.declPtr(decl).declPtrType(pt);
- return self.constantDeclRef(result_ptr_ty, decl);
+ .nav_ptr => |nav| {
+ const result_ptr_ty = try pt.navPtrType(nav);
+ return self.constantNavRef(result_ptr_ty, nav);
},
- .anon_decl_ptr => |ad| {
- const result_ptr_ty = Type.fromInterned(ad.orig_ty);
- return self.constantAnonDeclRef(result_ptr_ty, ad);
+ .uav_ptr => |uav| {
+ const result_ptr_ty = Type.fromInterned(uav.orig_ty);
+ return self.constantUavRef(result_ptr_ty, uav);
},
.eu_payload_ptr => @panic("TODO"),
.opt_payload_ptr => @panic("TODO"),
@@ -1227,10 +1224,10 @@ const DeclGen = struct {
}
}
- fn constantAnonDeclRef(
- self: *DeclGen,
+ fn constantUavRef(
+ self: *NavGen,
ty: Type,
- anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl,
+ uav: InternPool.Key.Ptr.BaseAddr.Uav,
) !IdRef {
// TODO: Merge this function with constantDeclRef.
@@ -1238,31 +1235,24 @@ const DeclGen = struct {
const mod = pt.zcu;
const ip = &mod.intern_pool;
const ty_id = try self.resolveType(ty, .direct);
- const decl_val = anon_decl.val;
- const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
+ const uav_ty = Type.fromInterned(ip.typeOf(uav.val));
- if (Value.fromInterned(decl_val).getFunction(mod)) |func| {
- _ = func;
- unreachable; // TODO
- } else if (Value.fromInterned(decl_val).getExternFunc(mod)) |func| {
- _ = func;
- unreachable;
+ switch (ip.indexToKey(uav.val)) {
+ .func => unreachable, // TODO
+ .@"extern" => assert(!ip.isFunctionType(uav_ty.toIntern())),
+ else => {},
}
// const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
- if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
- // Pointer to nothing - return undefoined
+ if (!uav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+ // Pointer to nothing - return undefined
return self.spv.constUndef(ty_id);
}
- if (decl_ty.zigTypeTag(mod) == .Fn) {
- unreachable; // TODO
- }
-
- // Anon decl refs are always generic.
+ // Uav refs are always generic.
assert(ty.ptrAddressSpace(mod) == .generic);
- const decl_ptr_ty_id = try self.ptrType(decl_ty, .Generic);
- const ptr_id = try self.resolveAnonDecl(decl_val);
+ const decl_ptr_ty_id = try self.ptrType(uav_ty, .Generic);
+ const ptr_id = try self.resolveUav(uav.val);
if (decl_ptr_ty_id != ty_id) {
// Differing pointer types, insert a cast.
@@ -1278,28 +1268,31 @@ const DeclGen = struct {
}
}
- fn constantDeclRef(self: *DeclGen, ty: Type, decl_index: InternPool.DeclIndex) !IdRef {
+ fn constantNavRef(self: *NavGen, ty: Type, nav_index: InternPool.Nav.Index) !IdRef {
const pt = self.pt;
const mod = pt.zcu;
+ const ip = &mod.intern_pool;
const ty_id = try self.resolveType(ty, .direct);
- const decl = mod.declPtr(decl_index);
+ const nav = ip.getNav(nav_index);
+ const nav_val = mod.navValue(nav_index);
+ const nav_ty = nav_val.typeOf(mod);
- switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
+ switch (ip.indexToKey(nav_val.toIntern())) {
.func => {
// TODO: Properly lower function pointers. For now we are going to hack around it and
// just generate an empty pointer. Function pointers are represented by a pointer to usize.
return try self.spv.constUndef(ty_id);
},
- .extern_func => unreachable, // TODO
+ .@"extern" => assert(!ip.isFunctionType(nav_ty.toIntern())), // TODO
else => {},
}
- if (!decl.typeOf(mod).isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+ if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
// Pointer to nothing - return undefined.
return self.spv.constUndef(ty_id);
}
- const spv_decl_index = try self.object.resolveDecl(mod, decl_index);
+ const spv_decl_index = try self.object.resolveNav(mod, nav_index);
const spv_decl = self.spv.declPtr(spv_decl_index);
const decl_id = switch (spv_decl.kind) {
@@ -1307,10 +1300,10 @@ const DeclGen = struct {
.global, .invocation_global => spv_decl.result_id,
};
- const final_storage_class = self.spvStorageClass(decl.@"addrspace");
+ const final_storage_class = self.spvStorageClass(nav.status.resolved.@"addrspace");
try self.addFunctionDep(spv_decl_index, final_storage_class);
- const decl_ptr_ty_id = try self.ptrType(decl.typeOf(mod), final_storage_class);
+ const decl_ptr_ty_id = try self.ptrType(nav_ty, final_storage_class);
const ptr_id = switch (final_storage_class) {
.Generic => try self.castToGeneric(decl_ptr_ty_id, decl_id),
@@ -1332,7 +1325,7 @@ const DeclGen = struct {
}
// Turn a Zig type's name into a cache reference.
- fn resolveTypeName(self: *DeclGen, ty: Type) ![]const u8 {
+ fn resolveTypeName(self: *NavGen, ty: Type) ![]const u8 {
var name = std.ArrayList(u8).init(self.gpa);
defer name.deinit();
try ty.print(name.writer(), self.pt);
@@ -1343,7 +1336,7 @@ const DeclGen = struct {
/// The integer type that is returned by this function is the type that is used to perform
/// actual operations (as well as store) a Zig type of a particular number of bits. To create
/// a type with an exact size, use SpvModule.intType.
- fn intType(self: *DeclGen, signedness: std.builtin.Signedness, bits: u16) !IdRef {
+ fn intType(self: *NavGen, signedness: std.builtin.Signedness, bits: u16) !IdRef {
const backing_bits = self.backingIntBits(bits) orelse {
// TODO: Integers too big for any native type are represented as "composite integers":
// An array of largestSupportedIntBits.
@@ -1358,7 +1351,7 @@ const DeclGen = struct {
return self.spv.intType(.unsigned, backing_bits);
}
- fn arrayType(self: *DeclGen, len: u32, child_ty: IdRef) !IdRef {
+ fn arrayType(self: *NavGen, len: u32, child_ty: IdRef) !IdRef {
// TODO: Cache??
const len_id = try self.constInt(Type.u32, len, .direct);
const result_id = self.spv.allocId();
@@ -1371,11 +1364,11 @@ const DeclGen = struct {
return result_id;
}
- fn ptrType(self: *DeclGen, child_ty: Type, storage_class: StorageClass) !IdRef {
+ fn ptrType(self: *NavGen, child_ty: Type, storage_class: StorageClass) !IdRef {
return try self.ptrType2(child_ty, storage_class, .indirect);
}
- fn ptrType2(self: *DeclGen, child_ty: Type, storage_class: StorageClass, child_repr: Repr) !IdRef {
+ fn ptrType2(self: *NavGen, child_ty: Type, storage_class: StorageClass, child_repr: Repr) !IdRef {
const key = .{ child_ty.toIntern(), storage_class, child_repr };
const entry = try self.ptr_types.getOrPut(self.gpa, key);
if (entry.found_existing) {
@@ -1407,7 +1400,7 @@ const DeclGen = struct {
return result_id;
}
- fn functionType(self: *DeclGen, return_ty: Type, param_types: []const Type) !IdRef {
+ fn functionType(self: *NavGen, return_ty: Type, param_types: []const Type) !IdRef {
// TODO: Cache??
const param_ids = try self.gpa.alloc(IdRef, param_types.len);
@@ -1427,7 +1420,7 @@ const DeclGen = struct {
return ty_id;
}
- fn zigScalarOrVectorTypeLike(self: *DeclGen, new_ty: Type, base_ty: Type) !Type {
+ fn zigScalarOrVectorTypeLike(self: *NavGen, new_ty: Type, base_ty: Type) !Type {
const pt = self.pt;
const new_scalar_ty = new_ty.scalarType(pt.zcu);
if (!base_ty.isVector(pt.zcu)) {
@@ -1458,7 +1451,7 @@ const DeclGen = struct {
/// padding: [padding_size]u8,
/// }
/// If any of the fields' size is 0, it will be omitted.
- fn resolveUnionType(self: *DeclGen, ty: Type) !IdRef {
+ fn resolveUnionType(self: *NavGen, ty: Type) !IdRef {
const mod = self.pt.zcu;
const ip = &mod.intern_pool;
const union_obj = mod.typeToUnion(ty).?;
@@ -1509,7 +1502,7 @@ const DeclGen = struct {
return result_id;
}
- fn resolveFnReturnType(self: *DeclGen, ret_ty: Type) !IdRef {
+ fn resolveFnReturnType(self: *NavGen, ret_ty: Type) !IdRef {
const pt = self.pt;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
// If the return type is an error set or an error union, then we make this
@@ -1526,7 +1519,7 @@ const DeclGen = struct {
}
/// Turn a Zig type into a SPIR-V Type, and return a reference to it.
- fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!IdRef {
+ fn resolveType(self: *NavGen, ty: Type, repr: Repr) Error!IdRef {
if (self.intern_map.get(.{ ty.toIntern(), repr })) |id| {
return id;
}
@@ -1536,7 +1529,7 @@ const DeclGen = struct {
return id;
}
- fn resolveTypeInner(self: *DeclGen, ty: Type, repr: Repr) Error!IdRef {
+ fn resolveTypeInner(self: *NavGen, ty: Type, repr: Repr) Error!IdRef {
const pt = self.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
@@ -1839,7 +1832,7 @@ const DeclGen = struct {
}
}
- fn spvStorageClass(self: *DeclGen, as: std.builtin.AddressSpace) StorageClass {
+ fn spvStorageClass(self: *NavGen, as: std.builtin.AddressSpace) StorageClass {
const target = self.getTarget();
return switch (as) {
.generic => switch (target.os.tag) {
@@ -1882,7 +1875,7 @@ const DeclGen = struct {
}
};
- fn errorUnionLayout(self: *DeclGen, payload_ty: Type) ErrorUnionLayout {
+ fn errorUnionLayout(self: *NavGen, payload_ty: Type) ErrorUnionLayout {
const pt = self.pt;
const error_align = Type.anyerror.abiAlignment(pt);
@@ -1913,7 +1906,7 @@ const DeclGen = struct {
total_fields: u32,
};
- fn unionLayout(self: *DeclGen, ty: Type) UnionLayout {
+ fn unionLayout(self: *NavGen, ty: Type) UnionLayout {
const pt = self.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
@@ -2004,25 +1997,25 @@ const DeclGen = struct {
return .{ .ty = ty, .value = .{ .singleton = singleton } };
}
- fn materialize(self: Temporary, dg: *DeclGen) !IdResult {
- const mod = dg.pt.zcu;
+ fn materialize(self: Temporary, ng: *NavGen) !IdResult {
+ const mod = ng.pt.zcu;
switch (self.value) {
.singleton => |id| return id,
.exploded_vector => |range| {
assert(self.ty.isVector(mod));
assert(self.ty.vectorLen(mod) == range.len);
- const consituents = try dg.gpa.alloc(IdRef, range.len);
- defer dg.gpa.free(consituents);
+ const consituents = try ng.gpa.alloc(IdRef, range.len);
+ defer ng.gpa.free(consituents);
for (consituents, 0..range.len) |*id, i| {
id.* = range.at(i);
}
- return dg.constructVector(self.ty, consituents);
+ return ng.constructVector(self.ty, consituents);
},
}
}
- fn vectorization(self: Temporary, dg: *DeclGen) Vectorization {
- return Vectorization.fromType(self.ty, dg);
+ fn vectorization(self: Temporary, ng: *NavGen) Vectorization {
+ return Vectorization.fromType(self.ty, ng);
}
fn pun(self: Temporary, new_ty: Type) Temporary {
@@ -2034,8 +2027,8 @@ const DeclGen = struct {
/// 'Explode' a temporary into separate elements. This turns a vector
/// into a bag of elements.
- fn explode(self: Temporary, dg: *DeclGen) !IdRange {
- const mod = dg.pt.zcu;
+ fn explode(self: Temporary, ng: *NavGen) !IdRange {
+ const mod = ng.pt.zcu;
// If the value is a scalar, then this is a no-op.
if (!self.ty.isVector(mod)) {
@@ -2045,9 +2038,9 @@ const DeclGen = struct {
};
}
- const ty_id = try dg.resolveType(self.ty.scalarType(mod), .direct);
+ const ty_id = try ng.resolveType(self.ty.scalarType(mod), .direct);
const n = self.ty.vectorLen(mod);
- const results = dg.spv.allocIds(n);
+ const results = ng.spv.allocIds(n);
const id = switch (self.value) {
.singleton => |id| id,
@@ -2056,7 +2049,7 @@ const DeclGen = struct {
for (0..n) |i| {
const indexes = [_]u32{@intCast(i)};
- try dg.func.body.emit(dg.spv.gpa, .OpCompositeExtract, .{
+ try ng.func.body.emit(ng.spv.gpa, .OpCompositeExtract, .{
.id_result_type = ty_id,
.id_result = results.at(i),
.composite = id,
@@ -2069,7 +2062,7 @@ const DeclGen = struct {
};
/// Initialize a `Temporary` from an AIR value.
- fn temporary(self: *DeclGen, inst: Air.Inst.Ref) !Temporary {
+ fn temporary(self: *NavGen, inst: Air.Inst.Ref) !Temporary {
return .{
.ty = self.typeOf(inst),
.value = .{ .singleton = try self.resolve(inst) },
@@ -2093,11 +2086,11 @@ const DeclGen = struct {
/// Derive a vectorization from a particular type. This usually
/// only checks the size, but the source-of-truth is implemented
/// by `isSpvVector()`.
- fn fromType(ty: Type, dg: *DeclGen) Vectorization {
- const mod = dg.pt.zcu;
+ fn fromType(ty: Type, ng: *NavGen) Vectorization {
+ const mod = ng.pt.zcu;
if (!ty.isVector(mod)) {
return .scalar;
- } else if (dg.isSpvVector(ty)) {
+ } else if (ng.isSpvVector(ty)) {
return .{ .spv_vectorized = ty.vectorLen(mod) };
} else {
return .{ .unrolled = ty.vectorLen(mod) };
@@ -2169,8 +2162,8 @@ const DeclGen = struct {
/// Turns `ty` into the result-type of an individual vector operation.
/// `ty` may be a scalar or vector, it doesn't matter.
- fn operationType(self: Vectorization, dg: *DeclGen, ty: Type) !Type {
- const pt = dg.pt;
+ fn operationType(self: Vectorization, ng: *NavGen, ty: Type) !Type {
+ const pt = ng.pt;
const scalar_ty = ty.scalarType(pt.zcu);
return switch (self) {
.scalar, .unrolled => scalar_ty,
@@ -2183,8 +2176,8 @@ const DeclGen = struct {
/// Turns `ty` into the result-type of the entire operation.
/// `ty` may be a scalar or vector, it doesn't matter.
- fn resultType(self: Vectorization, dg: *DeclGen, ty: Type) !Type {
- const pt = dg.pt;
+ fn resultType(self: Vectorization, ng: *NavGen, ty: Type) !Type {
+ const pt = ng.pt;
const scalar_ty = ty.scalarType(pt.zcu);
return switch (self) {
.scalar => scalar_ty,
@@ -2198,10 +2191,10 @@ const DeclGen = struct {
/// Before a temporary can be used, some setup may need to be one. This function implements
/// this setup, and returns a new type that holds the relevant information on how to access
/// elements of the input.
- fn prepare(self: Vectorization, dg: *DeclGen, tmp: Temporary) !PreparedOperand {
- const pt = dg.pt;
+ fn prepare(self: Vectorization, ng: *NavGen, tmp: Temporary) !PreparedOperand {
+ const pt = ng.pt;
const is_vector = tmp.ty.isVector(pt.zcu);
- const is_spv_vector = dg.isSpvVector(tmp.ty);
+ const is_spv_vector = ng.isSpvVector(tmp.ty);
const value: PreparedOperand.Value = switch (tmp.value) {
.singleton => |id| switch (self) {
.scalar => blk: {
@@ -2220,7 +2213,7 @@ const DeclGen = struct {
.child = tmp.ty.toIntern(),
});
- const vector = try dg.constructVectorSplat(vector_ty, id);
+ const vector = try ng.constructVectorSplat(vector_ty, id);
return .{
.ty = vector_ty,
.value = .{ .spv_vectorwise = vector },
@@ -2228,7 +2221,7 @@ const DeclGen = struct {
},
.unrolled => blk: {
if (is_vector) {
- break :blk .{ .vector_exploded = try tmp.explode(dg) };
+ break :blk .{ .vector_exploded = try tmp.explode(ng) };
} else {
break :blk .{ .scalar_broadcast = id };
}
@@ -2243,7 +2236,7 @@ const DeclGen = struct {
// a type that cannot do that.
assert(is_spv_vector);
assert(range.len == n);
- const vec = try tmp.materialize(dg);
+ const vec = try tmp.materialize(ng);
break :blk .{ .spv_vectorwise = vec };
},
.unrolled => |n| blk: {
@@ -2324,7 +2317,7 @@ const DeclGen = struct {
/// - A `Vectorization` instance
/// - A Type, in which case the vectorization is computed via `Vectorization.fromType`.
/// - A Temporary, in which case the vectorization is computed via `Temporary.vectorization`.
- fn vectorization(self: *DeclGen, args: anytype) Vectorization {
+ fn vectorization(self: *NavGen, args: anytype) Vectorization {
var v: Vectorization = undefined;
assert(args.len >= 1);
inline for (args, 0..) |arg, i| {
@@ -2345,7 +2338,7 @@ const DeclGen = struct {
/// This function builds an OpSConvert of OpUConvert depending on the
/// signedness of the types.
- fn buildIntConvert(self: *DeclGen, dst_ty: Type, src: Temporary) !Temporary {
+ fn buildIntConvert(self: *NavGen, dst_ty: Type, src: Temporary) !Temporary {
const mod = self.pt.zcu;
const dst_ty_id = try self.resolveType(dst_ty.scalarType(mod), .direct);
@@ -2384,7 +2377,7 @@ const DeclGen = struct {
return v.finalize(result_ty, results);
}
- fn buildFma(self: *DeclGen, a: Temporary, b: Temporary, c: Temporary) !Temporary {
+ fn buildFma(self: *NavGen, a: Temporary, b: Temporary, c: Temporary) !Temporary {
const target = self.getTarget();
const v = self.vectorization(.{ a, b, c });
@@ -2424,7 +2417,7 @@ const DeclGen = struct {
return v.finalize(result_ty, results);
}
- fn buildSelect(self: *DeclGen, condition: Temporary, lhs: Temporary, rhs: Temporary) !Temporary {
+ fn buildSelect(self: *NavGen, condition: Temporary, lhs: Temporary, rhs: Temporary) !Temporary {
const mod = self.pt.zcu;
const v = self.vectorization(.{ condition, lhs, rhs });
@@ -2475,7 +2468,7 @@ const DeclGen = struct {
f_oge,
};
- fn buildCmp(self: *DeclGen, pred: CmpPredicate, lhs: Temporary, rhs: Temporary) !Temporary {
+ fn buildCmp(self: *NavGen, pred: CmpPredicate, lhs: Temporary, rhs: Temporary) !Temporary {
const v = self.vectorization(.{ lhs, rhs });
const ops = v.operations();
const results = self.spv.allocIds(ops);
@@ -2543,7 +2536,7 @@ const DeclGen = struct {
log10,
};
- fn buildUnary(self: *DeclGen, op: UnaryOp, operand: Temporary) !Temporary {
+ fn buildUnary(self: *NavGen, op: UnaryOp, operand: Temporary) !Temporary {
const target = self.getTarget();
const v = blk: {
const v = self.vectorization(.{operand});
@@ -2673,7 +2666,7 @@ const DeclGen = struct {
l_or,
};
- fn buildBinary(self: *DeclGen, op: BinaryOp, lhs: Temporary, rhs: Temporary) !Temporary {
+ fn buildBinary(self: *NavGen, op: BinaryOp, lhs: Temporary, rhs: Temporary) !Temporary {
const target = self.getTarget();
const v = self.vectorization(.{ lhs, rhs });
@@ -2762,7 +2755,7 @@ const DeclGen = struct {
/// This function builds an extended multiplication, either OpSMulExtended or OpUMulExtended on Vulkan,
/// or OpIMul and s_mul_hi or u_mul_hi on OpenCL.
fn buildWideMul(
- self: *DeclGen,
+ self: *NavGen,
op: enum {
s_mul_extended,
u_mul_extended,
@@ -2893,7 +2886,7 @@ const DeclGen = struct {
/// OpFunctionEnd
/// TODO is to also write out the error as a function call parameter, and to somehow fetch
/// the name of an error in the text executor.
- fn generateTestEntryPoint(self: *DeclGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void {
+ fn generateTestEntryPoint(self: *NavGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void {
const anyerror_ty_id = try self.resolveType(Type.anyerror, .direct);
const ptr_anyerror_ty = try self.pt.ptrType(.{
.child = Type.anyerror.toIntern(),
@@ -2946,21 +2939,22 @@ const DeclGen = struct {
try self.spv.declareEntryPoint(spv_decl_index, test_name, .Kernel);
}
- fn genDecl(self: *DeclGen) !void {
+ fn genNav(self: *NavGen) !void {
const pt = self.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
- const decl = mod.declPtr(self.decl_index);
- const spv_decl_index = try self.object.resolveDecl(mod, self.decl_index);
+ const spv_decl_index = try self.object.resolveNav(mod, self.owner_nav);
const result_id = self.spv.declPtr(spv_decl_index).result_id;
+ const nav = ip.getNav(self.owner_nav);
+ const val = mod.navValue(self.owner_nav);
+ const ty = val.typeOf(mod);
switch (self.spv.declPtr(spv_decl_index).kind) {
.func => {
- assert(decl.typeOf(mod).zigTypeTag(mod) == .Fn);
- const fn_info = mod.typeToFunc(decl.typeOf(mod)).?;
+ const fn_info = mod.typeToFunc(ty).?;
const return_ty_id = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
- const prototype_ty_id = try self.resolveType(decl.typeOf(mod), .direct);
+ const prototype_ty_id = try self.resolveType(ty, .direct);
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
.id_result_type = return_ty_id,
.id_result = result_id,
@@ -3012,27 +3006,26 @@ const DeclGen = struct {
// Append the actual code into the functions section.
try self.spv.addFunction(spv_decl_index, self.func);
- try self.spv.debugName(result_id, decl.fqn.toSlice(ip));
+ try self.spv.debugName(result_id, nav.fqn.toSlice(ip));
// Temporarily generate a test kernel declaration if this is a test function.
- if (self.pt.zcu.test_functions.contains(self.decl_index)) {
- try self.generateTestEntryPoint(decl.fqn.toSlice(ip), spv_decl_index);
+ if (self.pt.zcu.test_functions.contains(self.owner_nav)) {
+ try self.generateTestEntryPoint(nav.fqn.toSlice(ip), spv_decl_index);
}
},
.global => {
- const maybe_init_val: ?Value = blk: {
- if (decl.val.getVariable(mod)) |payload| {
- if (payload.is_extern) break :blk null;
- break :blk Value.fromInterned(payload.init);
- }
- break :blk decl.val;
+ const maybe_init_val: ?Value = switch (ip.indexToKey(val.toIntern())) {
+ .func => unreachable,
+ .variable => |variable| Value.fromInterned(variable.init),
+ .@"extern" => null,
+ else => val,
};
assert(maybe_init_val == null); // TODO
- const final_storage_class = self.spvStorageClass(decl.@"addrspace");
+ const final_storage_class = self.spvStorageClass(nav.status.resolved.@"addrspace");
assert(final_storage_class != .Generic); // These should be instance globals
- const ptr_ty_id = try self.ptrType(decl.typeOf(mod), final_storage_class);
+ const ptr_ty_id = try self.ptrType(ty, final_storage_class);
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpVariable, .{
.id_result_type = ptr_ty_id,
@@ -3040,21 +3033,20 @@ const DeclGen = struct {
.storage_class = final_storage_class,
});
- try self.spv.debugName(result_id, decl.fqn.toSlice(ip));
+ try self.spv.debugName(result_id, nav.fqn.toSlice(ip));
try self.spv.declareDeclDeps(spv_decl_index, &.{});
},
.invocation_global => {
- const maybe_init_val: ?Value = blk: {
- if (decl.val.getVariable(mod)) |payload| {
- if (payload.is_extern) break :blk null;
- break :blk Value.fromInterned(payload.init);
- }
- break :blk decl.val;
+ const maybe_init_val: ?Value = switch (ip.indexToKey(val.toIntern())) {
+ .func => unreachable,
+ .variable => |variable| Value.fromInterned(variable.init),
+ .@"extern" => null,
+ else => val,
};
try self.spv.declareDeclDeps(spv_decl_index, &.{});
- const ptr_ty_id = try self.ptrType(decl.typeOf(mod), .Function);
+ const ptr_ty_id = try self.ptrType(ty, .Function);
if (maybe_init_val) |init_val| {
// TODO: Combine with resolveAnonDecl?
@@ -3074,7 +3066,7 @@ const DeclGen = struct {
});
self.current_block_label = root_block_id;
- const val_id = try self.constant(decl.typeOf(mod), init_val, .indirect);
+ const val_id = try self.constant(ty, init_val, .indirect);
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = result_id,
.object = val_id,
@@ -3084,7 +3076,7 @@ const DeclGen = struct {
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
try self.spv.addFunction(spv_decl_index, self.func);
- try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{decl.fqn.fmt(ip)});
+ try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{nav.fqn.fmt(ip)});
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
.id_result_type = ptr_ty_id,
@@ -3106,11 +3098,11 @@ const DeclGen = struct {
}
}
- fn intFromBool(self: *DeclGen, value: Temporary) !Temporary {
+ fn intFromBool(self: *NavGen, value: Temporary) !Temporary {
return try self.intFromBool2(value, Type.u1);
}
- fn intFromBool2(self: *DeclGen, value: Temporary, result_ty: Type) !Temporary {
+ fn intFromBool2(self: *NavGen, value: Temporary, result_ty: Type) !Temporary {
const zero_id = try self.constInt(result_ty, 0, .direct);
const one_id = try self.constInt(result_ty, 1, .direct);
@@ -3123,7 +3115,7 @@ const DeclGen = struct {
/// Convert representation from indirect (in memory) to direct (in 'register')
/// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct).
- fn convertToDirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
+ fn convertToDirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef {
const mod = self.pt.zcu;
switch (ty.scalarType(mod).zigTypeTag(mod)) {
.Bool => {
@@ -3149,7 +3141,7 @@ const DeclGen = struct {
/// Convert representation from direct (in 'register) to direct (in memory)
/// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect).
- fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
+ fn convertToIndirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef {
const mod = self.pt.zcu;
switch (ty.scalarType(mod).zigTypeTag(mod)) {
.Bool => {
@@ -3160,7 +3152,7 @@ const DeclGen = struct {
}
}
- fn extractField(self: *DeclGen, result_ty: Type, object: IdRef, field: u32) !IdRef {
+ fn extractField(self: *NavGen, result_ty: Type, object: IdRef, field: u32) !IdRef {
const result_ty_id = try self.resolveType(result_ty, .indirect);
const result_id = self.spv.allocId();
const indexes = [_]u32{field};
@@ -3174,7 +3166,7 @@ const DeclGen = struct {
return try self.convertToDirect(result_ty, result_id);
}
- fn extractVectorComponent(self: *DeclGen, result_ty: Type, vector_id: IdRef, field: u32) !IdRef {
+ fn extractVectorComponent(self: *NavGen, result_ty: Type, vector_id: IdRef, field: u32) !IdRef {
// Whether this is an OpTypeVector or OpTypeArray, we need to emit the same instruction regardless.
const result_ty_id = try self.resolveType(result_ty, .direct);
const result_id = self.spv.allocId();
@@ -3193,7 +3185,7 @@ const DeclGen = struct {
is_volatile: bool = false,
};
- fn load(self: *DeclGen, value_ty: Type, ptr_id: IdRef, options: MemoryOptions) !IdRef {
+ fn load(self: *NavGen, value_ty: Type, ptr_id: IdRef, options: MemoryOptions) !IdRef {
const indirect_value_ty_id = try self.resolveType(value_ty, .indirect);
const result_id = self.spv.allocId();
const access = spec.MemoryAccess.Extended{
@@ -3208,7 +3200,7 @@ const DeclGen = struct {
return try self.convertToDirect(value_ty, result_id);
}
- fn store(self: *DeclGen, value_ty: Type, ptr_id: IdRef, value_id: IdRef, options: MemoryOptions) !void {
+ fn store(self: *NavGen, value_ty: Type, ptr_id: IdRef, value_id: IdRef, options: MemoryOptions) !void {
const indirect_value_id = try self.convertToIndirect(value_ty, value_id);
const access = spec.MemoryAccess.Extended{
.Volatile = options.is_volatile,
@@ -3220,13 +3212,13 @@ const DeclGen = struct {
});
}
- fn genBody(self: *DeclGen, body: []const Air.Inst.Index) Error!void {
+ fn genBody(self: *NavGen, body: []const Air.Inst.Index) Error!void {
for (body) |inst| {
try self.genInst(inst);
}
}
- fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void {
+ fn genInst(self: *NavGen, inst: Air.Inst.Index) !void {
const mod = self.pt.zcu;
const ip = &mod.intern_pool;
if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip))
@@ -3397,7 +3389,7 @@ const DeclGen = struct {
try self.inst_results.putNoClobber(self.gpa, inst, result_id);
}
- fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, op: BinaryOp) !?IdRef {
+ fn airBinOpSimple(self: *NavGen, inst: Air.Inst.Index, op: BinaryOp) !?IdRef {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.temporary(bin_op.lhs);
const rhs = try self.temporary(bin_op.rhs);
@@ -3406,7 +3398,7 @@ const DeclGen = struct {
return try result.materialize(self);
}
- fn airShift(self: *DeclGen, inst: Air.Inst.Index, unsigned: BinaryOp, signed: BinaryOp) !?IdRef {
+ fn airShift(self: *NavGen, inst: Air.Inst.Index, unsigned: BinaryOp, signed: BinaryOp) !?IdRef {
const mod = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -3441,7 +3433,7 @@ const DeclGen = struct {
const MinMax = enum { min, max };
- fn airMinMax(self: *DeclGen, inst: Air.Inst.Index, op: MinMax) !?IdRef {
+ fn airMinMax(self: *NavGen, inst: Air.Inst.Index, op: MinMax) !?IdRef {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.temporary(bin_op.lhs);
@@ -3451,7 +3443,7 @@ const DeclGen = struct {
return try result.materialize(self);
}
- fn minMax(self: *DeclGen, lhs: Temporary, rhs: Temporary, op: MinMax) !Temporary {
+ fn minMax(self: *NavGen, lhs: Temporary, rhs: Temporary, op: MinMax) !Temporary {
const info = self.arithmeticTypeInfo(lhs.ty);
const binop: BinaryOp = switch (info.class) {
@@ -3484,7 +3476,7 @@ const DeclGen = struct {
/// - Signed integers are also sign extended if they are negative.
/// All other values are returned unmodified (this makes strange integer
/// wrapping easier to use in generic operations).
- fn normalize(self: *DeclGen, value: Temporary, info: ArithmeticTypeInfo) !Temporary {
+ fn normalize(self: *NavGen, value: Temporary, info: ArithmeticTypeInfo) !Temporary {
const mod = self.pt.zcu;
const ty = value.ty;
switch (info.class) {
@@ -3507,7 +3499,7 @@ const DeclGen = struct {
}
}
- fn airDivFloor(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airDivFloor(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.temporary(bin_op.lhs);
@@ -3564,7 +3556,7 @@ const DeclGen = struct {
}
}
- fn airDivTrunc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airDivTrunc(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.temporary(bin_op.lhs);
@@ -3592,7 +3584,7 @@ const DeclGen = struct {
}
}
- fn airUnOpSimple(self: *DeclGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef {
+ fn airUnOpSimple(self: *NavGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.temporary(un_op);
const result = try self.buildUnary(op, operand);
@@ -3600,7 +3592,7 @@ const DeclGen = struct {
}
fn airArithOp(
- self: *DeclGen,
+ self: *NavGen,
inst: Air.Inst.Index,
comptime fop: BinaryOp,
comptime sop: BinaryOp,
@@ -3626,7 +3618,7 @@ const DeclGen = struct {
return try result.materialize(self);
}
- fn airAbs(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airAbs(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.temporary(ty_op.operand);
// Note: operand_ty may be signed, while ty is always unsigned!
@@ -3635,7 +3627,7 @@ const DeclGen = struct {
return try result.materialize(self);
}
- fn abs(self: *DeclGen, result_ty: Type, value: Temporary) !Temporary {
+ fn abs(self: *NavGen, result_ty: Type, value: Temporary) !Temporary {
const target = self.getTarget();
const operand_info = self.arithmeticTypeInfo(value.ty);
@@ -3658,7 +3650,7 @@ const DeclGen = struct {
}
fn airAddSubOverflow(
- self: *DeclGen,
+ self: *NavGen,
inst: Air.Inst.Index,
comptime add: BinaryOp,
comptime ucmp: CmpPredicate,
@@ -3724,7 +3716,7 @@ const DeclGen = struct {
);
}
- fn airMulOverflow(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airMulOverflow(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const target = self.getTarget();
const pt = self.pt;
@@ -3904,7 +3896,7 @@ const DeclGen = struct {
);
}
- fn airShlOverflow(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airShlOverflow(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -3944,7 +3936,7 @@ const DeclGen = struct {
);
}
- fn airMulAdd(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airMulAdd(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
@@ -3960,7 +3952,7 @@ const DeclGen = struct {
return try result.materialize(self);
}
- fn airClzCtz(self: *DeclGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef {
+ fn airClzCtz(self: *NavGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const mod = self.pt.zcu;
@@ -3991,7 +3983,7 @@ const DeclGen = struct {
return try result.materialize(self);
}
- fn airSelect(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airSelect(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const pred = try self.temporary(pl_op.operand);
@@ -4002,7 +3994,7 @@ const DeclGen = struct {
return try result.materialize(self);
}
- fn airSplat(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airSplat(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
@@ -4011,7 +4003,7 @@ const DeclGen = struct {
return try self.constructVectorSplat(result_ty, operand_id);
}
- fn airReduce(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airReduce(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.pt.zcu;
const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
const operand = try self.resolve(reduce.operand);
@@ -4086,7 +4078,7 @@ const DeclGen = struct {
return result_id;
}
- fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airShuffle(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -4163,7 +4155,7 @@ const DeclGen = struct {
return try self.constructVector(result_ty, components);
}
- fn indicesToIds(self: *DeclGen, indices: []const u32) ![]IdRef {
+ fn indicesToIds(self: *NavGen, indices: []const u32) ![]IdRef {
const ids = try self.gpa.alloc(IdRef, indices.len);
errdefer self.gpa.free(ids);
for (indices, ids) |index, *id| {
@@ -4174,7 +4166,7 @@ const DeclGen = struct {
}
fn accessChainId(
- self: *DeclGen,
+ self: *NavGen,
result_ty_id: IdRef,
base: IdRef,
indices: []const IdRef,
@@ -4194,7 +4186,7 @@ const DeclGen = struct {
/// same as that of the base pointer, or that of a dereferenced base pointer. AccessChain
/// is the latter and PtrAccessChain is the former.
fn accessChain(
- self: *DeclGen,
+ self: *NavGen,
result_ty_id: IdRef,
base: IdRef,
indices: []const u32,
@@ -4205,7 +4197,7 @@ const DeclGen = struct {
}
fn ptrAccessChain(
- self: *DeclGen,
+ self: *NavGen,
result_ty_id: IdRef,
base: IdRef,
element: IdRef,
@@ -4225,7 +4217,7 @@ const DeclGen = struct {
return result_id;
}
- fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef {
+ fn ptrAdd(self: *NavGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef {
const mod = self.pt.zcu;
const result_ty_id = try self.resolveType(result_ty, .direct);
@@ -4246,7 +4238,7 @@ const DeclGen = struct {
}
}
- fn airPtrAdd(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airPtrAdd(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_id = try self.resolve(bin_op.lhs);
@@ -4257,7 +4249,7 @@ const DeclGen = struct {
return try self.ptrAdd(result_ty, ptr_ty, ptr_id, offset_id);
}
- fn airPtrSub(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airPtrSub(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_id = try self.resolve(bin_op.lhs);
@@ -4277,7 +4269,7 @@ const DeclGen = struct {
}
fn cmp(
- self: *DeclGen,
+ self: *NavGen,
op: std.math.CompareOperator,
lhs: Temporary,
rhs: Temporary,
@@ -4443,7 +4435,7 @@ const DeclGen = struct {
}
fn airCmp(
- self: *DeclGen,
+ self: *NavGen,
inst: Air.Inst.Index,
comptime op: std.math.CompareOperator,
) !?IdRef {
@@ -4455,7 +4447,7 @@ const DeclGen = struct {
return try result.materialize(self);
}
- fn airVectorCmp(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airVectorCmp(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const vec_cmp = self.air.extraData(Air.VectorCmp, ty_pl.payload).data;
const lhs = try self.temporary(vec_cmp.lhs);
@@ -4468,7 +4460,7 @@ const DeclGen = struct {
/// Bitcast one type to another. Note: both types, input, output are expected in **direct** representation.
fn bitCast(
- self: *DeclGen,
+ self: *NavGen,
dst_ty: Type,
src_ty: Type,
src_id: IdRef,
@@ -4536,7 +4528,7 @@ const DeclGen = struct {
return result_id;
}
- fn airBitCast(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airBitCast(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
@@ -4544,7 +4536,7 @@ const DeclGen = struct {
return try self.bitCast(result_ty, operand_ty, operand_id);
}
- fn airIntCast(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airIntCast(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src = try self.temporary(ty_op.operand);
const dst_ty = self.typeOfIndex(inst);
@@ -4570,7 +4562,7 @@ const DeclGen = struct {
return try result.materialize(self);
}
- fn intFromPtr(self: *DeclGen, operand_id: IdRef) !IdRef {
+ fn intFromPtr(self: *NavGen, operand_id: IdRef) !IdRef {
const result_type_id = try self.resolveType(Type.usize, .direct);
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{
@@ -4581,13 +4573,13 @@ const DeclGen = struct {
return result_id;
}
- fn airIntFromPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airIntFromPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand_id = try self.resolve(un_op);
return try self.intFromPtr(operand_id);
}
- fn airFloatFromInt(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airFloatFromInt(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
const operand_id = try self.resolve(ty_op.operand);
@@ -4595,7 +4587,7 @@ const DeclGen = struct {
return try self.floatFromInt(result_ty, operand_ty, operand_id);
}
- fn floatFromInt(self: *DeclGen, result_ty: Type, operand_ty: Type, operand_id: IdRef) !IdRef {
+ fn floatFromInt(self: *NavGen, result_ty: Type, operand_ty: Type, operand_id: IdRef) !IdRef {
const operand_info = self.arithmeticTypeInfo(operand_ty);
const result_id = self.spv.allocId();
const result_ty_id = try self.resolveType(result_ty, .direct);
@@ -4614,14 +4606,14 @@ const DeclGen = struct {
return result_id;
}
- fn airIntFromFloat(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airIntFromFloat(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const result_ty = self.typeOfIndex(inst);
return try self.intFromFloat(result_ty, operand_id);
}
- fn intFromFloat(self: *DeclGen, result_ty: Type, operand_id: IdRef) !IdRef {
+ fn intFromFloat(self: *NavGen, result_ty: Type, operand_id: IdRef) !IdRef {
const result_info = self.arithmeticTypeInfo(result_ty);
const result_ty_id = try self.resolveType(result_ty, .direct);
const result_id = self.spv.allocId();
@@ -4640,14 +4632,14 @@ const DeclGen = struct {
return result_id;
}
- fn airIntFromBool(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airIntFromBool(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.temporary(un_op);
const result = try self.intFromBool(operand);
return try result.materialize(self);
}
- fn airFloatCast(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airFloatCast(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const dest_ty = self.typeOfIndex(inst);
@@ -4662,7 +4654,7 @@ const DeclGen = struct {
return result_id;
}
- fn airNot(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airNot(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.temporary(ty_op.operand);
const result_ty = self.typeOfIndex(inst);
@@ -4681,7 +4673,7 @@ const DeclGen = struct {
return try result.materialize(self);
}
- fn airArrayToSlice(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airArrayToSlice(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -4709,7 +4701,7 @@ const DeclGen = struct {
);
}
- fn airSlice(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airSlice(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_id = try self.resolve(bin_op.lhs);
@@ -4726,7 +4718,7 @@ const DeclGen = struct {
);
}
- fn airAggregateInit(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airAggregateInit(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
@@ -4816,7 +4808,7 @@ const DeclGen = struct {
}
}
- fn sliceOrArrayLen(self: *DeclGen, operand_id: IdRef, ty: Type) !IdRef {
+ fn sliceOrArrayLen(self: *NavGen, operand_id: IdRef, ty: Type) !IdRef {
const pt = self.pt;
const mod = pt.zcu;
switch (ty.ptrSize(mod)) {
@@ -4832,7 +4824,7 @@ const DeclGen = struct {
}
}
- fn sliceOrArrayPtr(self: *DeclGen, operand_id: IdRef, ty: Type) !IdRef {
+ fn sliceOrArrayPtr(self: *NavGen, operand_id: IdRef, ty: Type) !IdRef {
const mod = self.pt.zcu;
if (ty.isSlice(mod)) {
const ptr_ty = ty.slicePtrFieldType(mod);
@@ -4841,7 +4833,7 @@ const DeclGen = struct {
return operand_id;
}
- fn airMemcpy(self: *DeclGen, inst: Air.Inst.Index) !void {
+ fn airMemcpy(self: *NavGen, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dest_slice = try self.resolve(bin_op.lhs);
const src_slice = try self.resolve(bin_op.rhs);
@@ -4857,14 +4849,14 @@ const DeclGen = struct {
});
}
- fn airSliceField(self: *DeclGen, inst: Air.Inst.Index, field: u32) !?IdRef {
+ fn airSliceField(self: *NavGen, inst: Air.Inst.Index, field: u32) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const field_ty = self.typeOfIndex(inst);
const operand_id = try self.resolve(ty_op.operand);
return try self.extractField(field_ty, operand_id, field);
}
- fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airSliceElemPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -4881,7 +4873,7 @@ const DeclGen = struct {
return try self.ptrAccessChain(ptr_ty_id, slice_ptr, index_id, &.{});
}
- fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airSliceElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const slice_ty = self.typeOf(bin_op.lhs);
@@ -4898,7 +4890,7 @@ const DeclGen = struct {
return try self.load(slice_ty.childType(mod), elem_ptr, .{ .is_volatile = slice_ty.isVolatilePtr(mod) });
}
- fn ptrElemPtr(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef {
+ fn ptrElemPtr(self: *NavGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef {
const mod = self.pt.zcu;
// Construct new pointer type for the resulting pointer
const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
@@ -4913,7 +4905,7 @@ const DeclGen = struct {
}
}
- fn airPtrElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airPtrElemPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -4931,7 +4923,7 @@ const DeclGen = struct {
return try self.ptrElemPtr(src_ptr_ty, ptr_id, index_id);
}
- fn airArrayElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airArrayElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const array_ty = self.typeOf(bin_op.lhs);
@@ -4992,7 +4984,7 @@ const DeclGen = struct {
return try self.convertToDirect(elem_ty, result_id);
}
- fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airPtrElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
@@ -5003,7 +4995,7 @@ const DeclGen = struct {
return try self.load(elem_ty, elem_ptr_id, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
}
- fn airVectorStoreElem(self: *DeclGen, inst: Air.Inst.Index) !void {
+ fn airVectorStoreElem(self: *NavGen, inst: Air.Inst.Index) !void {
const mod = self.pt.zcu;
const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
const extra = self.air.extraData(Air.Bin, data.payload).data;
@@ -5025,7 +5017,7 @@ const DeclGen = struct {
});
}
- fn airSetUnionTag(self: *DeclGen, inst: Air.Inst.Index) !void {
+ fn airSetUnionTag(self: *NavGen, inst: Air.Inst.Index) !void {
const mod = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const un_ptr_ty = self.typeOf(bin_op.lhs);
@@ -5048,7 +5040,7 @@ const DeclGen = struct {
}
}
- fn airGetUnionTag(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airGetUnionTag(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const un_ty = self.typeOf(ty_op.operand);
@@ -5064,7 +5056,7 @@ const DeclGen = struct {
}
fn unionInit(
- self: *DeclGen,
+ self: *NavGen,
ty: Type,
active_field: u32,
payload: ?IdRef,
@@ -5129,7 +5121,7 @@ const DeclGen = struct {
return try self.load(ty, tmp_id, .{});
}
- fn airUnionInit(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airUnionInit(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
@@ -5146,7 +5138,7 @@ const DeclGen = struct {
return try self.unionInit(ty, extra.field_index, payload);
}
- fn airStructFieldVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airStructFieldVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -5191,7 +5183,7 @@ const DeclGen = struct {
}
}
- fn airFieldParentPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airFieldParentPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
const mod = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -5225,7 +5217,7 @@ const DeclGen = struct {
}
fn structFieldPtr(
- self: *DeclGen,
+ self: *NavGen,
result_ptr_ty: Type,
object_ptr_ty: Type,
object_ptr: IdRef,
@@ -5273,7 +5265,7 @@ const DeclGen = struct {
}
}
- fn airStructFieldPtrIndex(self: *DeclGen, inst: Air.Inst.Index, field_index: u32) !?IdRef {
+ fn airStructFieldPtrIndex(self: *NavGen, inst: Air.Inst.Index, field_index: u32) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const struct_ptr = try self.resolve(ty_op.operand);
const struct_ptr_ty = self.typeOf(ty_op.operand);
@@ -5294,7 +5286,7 @@ const DeclGen = struct {
// which is in the Generic address space. The variable is actually
// placed in the Function address space.
fn alloc(
- self: *DeclGen,
+ self: *NavGen,
ty: Type,
options: AllocOptions,
) !IdRef {
@@ -5326,7 +5318,7 @@ const DeclGen = struct {
}
}
- fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airAlloc(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
assert(ptr_ty.ptrAddressSpace(mod) == .generic);
@@ -5334,7 +5326,7 @@ const DeclGen = struct {
return try self.alloc(child_ty, .{});
}
- fn airArg(self: *DeclGen) IdRef {
+ fn airArg(self: *NavGen) IdRef {
defer self.next_arg_index += 1;
return self.args.items[self.next_arg_index];
}
@@ -5343,7 +5335,7 @@ const DeclGen = struct {
/// block to jump to. This function emits instructions, so it should be emitted
/// inside the merge block of the block.
/// This function should only be called with structured control flow generation.
- fn structuredNextBlock(self: *DeclGen, incoming: []const ControlFlow.Structured.Block.Incoming) !IdRef {
+ fn structuredNextBlock(self: *NavGen, incoming: []const ControlFlow.Structured.Block.Incoming) !IdRef {
assert(self.control_flow == .structured);
const result_id = self.spv.allocId();
@@ -5362,7 +5354,7 @@ const DeclGen = struct {
/// Jumps to the block with the target block-id. This function must only be called when
/// terminating a body, there should be no instructions after it.
/// This function should only be called with structured control flow generation.
- fn structuredBreak(self: *DeclGen, target_block: IdRef) !void {
+ fn structuredBreak(self: *NavGen, target_block: IdRef) !void {
assert(self.control_flow == .structured);
const sblock = self.control_flow.structured.block_stack.getLast();
@@ -5393,7 +5385,7 @@ const DeclGen = struct {
/// should still be emitted to the block that should follow this structured body.
/// This function should only be called with structured control flow generation.
fn genStructuredBody(
- self: *DeclGen,
+ self: *NavGen,
/// This parameter defines the method that this structured body is exited with.
block_merge_type: union(enum) {
/// Using selection; early exits from this body are surrounded with
@@ -5487,13 +5479,13 @@ const DeclGen = struct {
}
}
- fn airBlock(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airBlock(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const inst_datas = self.air.instructions.items(.data);
const extra = self.air.extraData(Air.Block, inst_datas[@intFromEnum(inst)].ty_pl.payload);
return self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
}
- fn lowerBlock(self: *DeclGen, inst: Air.Inst.Index, body: []const Air.Inst.Index) !?IdRef {
+ fn lowerBlock(self: *NavGen, inst: Air.Inst.Index, body: []const Air.Inst.Index) !?IdRef {
// In AIR, a block doesn't really define an entry point like a block, but
// more like a scope that breaks can jump out of and "return" a value from.
// This cannot be directly modelled in SPIR-V, so in a block instruction,
@@ -5633,7 +5625,7 @@ const DeclGen = struct {
return null;
}
- fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void {
+ fn airBr(self: *NavGen, inst: Air.Inst.Index) !void {
const pt = self.pt;
const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
const operand_ty = self.typeOf(br.operand);
@@ -5670,7 +5662,7 @@ const DeclGen = struct {
}
}
- fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void {
+ fn airCondBr(self: *NavGen, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond_br = self.air.extraData(Air.CondBr, pl_op.payload);
const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra[cond_br.end..][0..cond_br.data.then_body_len]);
@@ -5730,7 +5722,7 @@ const DeclGen = struct {
}
}
- fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void {
+ fn airLoop(self: *NavGen, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]);
@@ -5777,7 +5769,7 @@ const DeclGen = struct {
}
}
- fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airLoad(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ptr_ty = self.typeOf(ty_op.operand);
@@ -5788,7 +5780,7 @@ const DeclGen = struct {
return try self.load(elem_ty, operand, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
}
- fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void {
+ fn airStore(self: *NavGen, inst: Air.Inst.Index) !void {
const mod = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
@@ -5799,14 +5791,13 @@ const DeclGen = struct {
try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
}
- fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
+ fn airRet(self: *NavGen, inst: Air.Inst.Index) !void {
const pt = self.pt;
const mod = pt.zcu;
const operand = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ret_ty = self.typeOf(operand);
if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
- const decl = mod.declPtr(self.decl_index);
- const fn_info = mod.typeToFunc(decl.typeOf(mod)).?;
+ const fn_info = mod.typeToFunc(mod.navValue(self.owner_nav).typeOf(mod)).?;
if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
@@ -5822,7 +5813,7 @@ const DeclGen = struct {
try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id });
}
- fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void {
+ fn airRetLoad(self: *NavGen, inst: Air.Inst.Index) !void {
const pt = self.pt;
const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@@ -5830,8 +5821,7 @@ const DeclGen = struct {
const ret_ty = ptr_ty.childType(mod);
if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
- const decl = mod.declPtr(self.decl_index);
- const fn_info = mod.typeToFunc(decl.typeOf(mod)).?;
+ const fn_info = mod.typeToFunc(mod.navValue(self.owner_nav).typeOf(mod)).?;
if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
@@ -5850,7 +5840,7 @@ const DeclGen = struct {
});
}
- fn airTry(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airTry(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const err_union_id = try self.resolve(pl_op.operand);
@@ -5920,7 +5910,7 @@ const DeclGen = struct {
return try self.extractField(payload_ty, err_union_id, eu_layout.payloadFieldIndex());
}
- fn airErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airErrUnionErr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
@@ -5943,7 +5933,7 @@ const DeclGen = struct {
return try self.extractField(Type.anyerror, operand_id, eu_layout.errorFieldIndex());
}
- fn airErrUnionPayload(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airErrUnionPayload(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const payload_ty = self.typeOfIndex(inst);
@@ -5956,7 +5946,7 @@ const DeclGen = struct {
return try self.extractField(payload_ty, operand_id, eu_layout.payloadFieldIndex());
}
- fn airWrapErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airWrapErrUnionErr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const err_union_ty = self.typeOfIndex(inst);
@@ -5981,7 +5971,7 @@ const DeclGen = struct {
return try self.constructStruct(err_union_ty, &types, &members);
}
- fn airWrapErrUnionPayload(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airWrapErrUnionPayload(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const err_union_ty = self.typeOfIndex(inst);
const operand_id = try self.resolve(ty_op.operand);
@@ -6003,7 +5993,7 @@ const DeclGen = struct {
return try self.constructStruct(err_union_ty, &types, &members);
}
- fn airIsNull(self: *DeclGen, inst: Air.Inst.Index, is_pointer: bool, pred: enum { is_null, is_non_null }) !?IdRef {
+ fn airIsNull(self: *NavGen, inst: Air.Inst.Index, is_pointer: bool, pred: enum { is_null, is_non_null }) !?IdRef {
const pt = self.pt;
const mod = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
@@ -6080,7 +6070,7 @@ const DeclGen = struct {
};
}
- fn airIsErr(self: *DeclGen, inst: Air.Inst.Index, pred: enum { is_err, is_non_err }) !?IdRef {
+ fn airIsErr(self: *NavGen, inst: Air.Inst.Index, pred: enum { is_err, is_non_err }) !?IdRef {
const mod = self.pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand_id = try self.resolve(un_op);
@@ -6113,7 +6103,7 @@ const DeclGen = struct {
return result_id;
}
- fn airUnwrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airUnwrapOptional(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -6130,7 +6120,7 @@ const DeclGen = struct {
return try self.extractField(payload_ty, operand_id, 0);
}
- fn airUnwrapOptionalPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airUnwrapOptionalPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -6155,7 +6145,7 @@ const DeclGen = struct {
return try self.accessChain(result_ty_id, operand_id, &.{0});
}
- fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airWrapOptional(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
const mod = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -6178,7 +6168,7 @@ const DeclGen = struct {
return try self.constructStruct(optional_ty, &types, &members);
}
- fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void {
+ fn airSwitchBr(self: *NavGen, inst: Air.Inst.Index) !void {
const pt = self.pt;
const mod = pt.zcu;
const target = self.getTarget();
@@ -6347,16 +6337,15 @@ const DeclGen = struct {
}
}
- fn airUnreach(self: *DeclGen) !void {
+ fn airUnreach(self: *NavGen) !void {
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
}
- fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
+ fn airDbgStmt(self: *NavGen, inst: Air.Inst.Index) !void {
const pt = self.pt;
const mod = pt.zcu;
const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
- const decl = mod.declPtr(self.decl_index);
- const path = decl.getFileScope(mod).sub_file_path;
+ const path = mod.navFileScope(self.owner_nav).sub_file_path;
try self.func.body.emit(self.spv.gpa, .OpLine, .{
.file = try self.spv.resolveString(path),
.line = self.base_line + dbg_stmt.line + 1,
@@ -6364,25 +6353,24 @@ const DeclGen = struct {
});
}
- fn airDbgInlineBlock(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airDbgInlineBlock(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.pt.zcu;
const inst_datas = self.air.instructions.items(.data);
const extra = self.air.extraData(Air.DbgInlineBlock, inst_datas[@intFromEnum(inst)].ty_pl.payload);
- const decl = mod.funcOwnerDeclPtr(extra.data.func);
const old_base_line = self.base_line;
defer self.base_line = old_base_line;
- self.base_line = decl.navSrcLine(mod);
+ self.base_line = mod.navSrcLine(mod.funcInfo(extra.data.func).owner_nav);
return self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
}
- fn airDbgVar(self: *DeclGen, inst: Air.Inst.Index) !void {
+ fn airDbgVar(self: *NavGen, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const target_id = try self.resolve(pl_op.operand);
const name = self.air.nullTerminatedString(pl_op.payload);
try self.spv.debugName(target_id, name);
}
- fn airAssembly(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airAssembly(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
@@ -6465,7 +6453,7 @@ const DeclGen = struct {
// TODO: Translate proper error locations.
assert(as.errors.items.len != 0);
assert(self.error_msg == null);
- const src_loc = mod.declPtr(self.decl_index).navSrcLoc(mod);
+ const src_loc = mod.navSrcLoc(self.owner_nav);
self.error_msg = try Zcu.ErrorMsg.create(mod.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
const notes = try mod.gpa.alloc(Zcu.ErrorMsg, as.errors.items.len);
@@ -6511,7 +6499,7 @@ const DeclGen = struct {
return null;
}
- fn airCall(self: *DeclGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef {
+ fn airCall(self: *NavGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef {
_ = modifier;
const pt = self.pt;
@@ -6566,7 +6554,7 @@ const DeclGen = struct {
return result_id;
}
- fn builtin3D(self: *DeclGen, result_ty: Type, builtin: spec.BuiltIn, dimension: u32, out_of_range_value: anytype) !IdRef {
+ fn builtin3D(self: *NavGen, result_ty: Type, builtin: spec.BuiltIn, dimension: u32, out_of_range_value: anytype) !IdRef {
if (dimension >= 3) {
return try self.constInt(result_ty, out_of_range_value, .direct);
}
@@ -6582,7 +6570,7 @@ const DeclGen = struct {
return try self.extractVectorComponent(result_ty, vec, dimension);
}
- fn airWorkItemId(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airWorkItemId(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const dimension = pl_op.payload;
@@ -6593,7 +6581,7 @@ const DeclGen = struct {
return try result.materialize(self);
}
- fn airWorkGroupSize(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airWorkGroupSize(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const dimension = pl_op.payload;
@@ -6604,7 +6592,7 @@ const DeclGen = struct {
return try result.materialize(self);
}
- fn airWorkGroupId(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airWorkGroupId(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const dimension = pl_op.payload;
@@ -6615,12 +6603,12 @@ const DeclGen = struct {
return try result.materialize(self);
}
- fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type {
+ fn typeOf(self: *NavGen, inst: Air.Inst.Ref) Type {
const mod = self.pt.zcu;
return self.air.typeOf(inst, &mod.intern_pool);
}
- fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type {
+ fn typeOfIndex(self: *NavGen, inst: Air.Inst.Index) Type {
const mod = self.pt.zcu;
return self.air.typeOfIndex(inst, &mod.intern_pool);
}