diff options
| author | mlugg <mlugg@mlugg.co.uk> | 2024-07-28 17:09:14 +0100 |
|---|---|---|
| committer | mlugg <mlugg@mlugg.co.uk> | 2024-08-11 07:29:41 +0100 |
| commit | 548a087fafeda5b07d2237d5137906b8d07da699 (patch) | |
| tree | 69135f129b84ab5b65f443d0a52899b232696e2b /src/codegen/llvm.zig | |
| parent | 531cd177e89c1edfcd2e52f74f220eb186a25f78 (diff) | |
| download | zig-548a087fafeda5b07d2237d5137906b8d07da699.tar.gz zig-548a087fafeda5b07d2237d5137906b8d07da699.zip | |
compiler: split Decl into Nav and Cau
The type `Zcu.Decl` in the compiler is problematic: over time it has
gained many responsibilities. Every source declaration, container type,
generic instantiation, and `@extern` has a `Decl`. The functions of
these `Decl`s are in some cases entirely disjoint.
After careful analysis, I determined that the two main responsibilities
of `Decl` are as follows:
* A `Decl` acts as the "subject" of semantic analysis at comptime. A
single unit of analysis is either a runtime function body, or a
`Decl`. It registers incremental dependencies, tracks analysis errors,
etc.
* A `Decl` acts as a "global variable": a pointer to it is consistent,
and it may be lowered to a specific symbol by the codegen backend.
This commit eliminates `Decl` and introduces new types to model these
responsibilities: `Cau` (Comptime Analysis Unit) and `Nav` (Named
Addressable Value).
Every source declaration, and every container type requiring resolution
(so *not* including `opaque`), has a `Cau`. For a source declaration,
this `Cau` performs the resolution of its value. (When #131 is
implemented, it is unsolved whether type and value resolution will share
a `Cau` or have two distinct `Cau`s.) For a type, this `Cau` is the
context in which type resolution occurs.
Every non-`comptime` source declaration, every generic instantiation,
and every distinct `extern` has a `Nav`. These are sent to codegen/link:
the backends by definition do not care about `Cau`s.
This commit has some minor technically-breaking changes surrounding
`usingnamespace`. I don't think they'll impact anyone, since the changes
are fixes around semantics which were previously inconsistent (the
behavior changed depending on hashmap iteration order!).
Aside from that, this changeset has no significant user-facing changes.
Instead, it is an internal refactor which makes it easier to correctly
model the responsibilities of different objects, particularly regarding
incremental compilation. The performance impact should be negligible,
but I will take measurements before merging this work into `master`.
Co-authored-by: Jacob Young <jacobly0@users.noreply.github.com>
Co-authored-by: Jakub Konka <kubkon@jakubkonka.com>
Diffstat (limited to 'src/codegen/llvm.zig')
| -rw-r--r-- | src/codegen/llvm.zig | 848 |
1 files changed, 409 insertions, 439 deletions
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 8b13b1f205..89a24152fc 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -776,7 +776,7 @@ pub const Object = struct { debug_enums: std.ArrayListUnmanaged(Builder.Metadata), debug_globals: std.ArrayListUnmanaged(Builder.Metadata), - debug_file_map: std.AutoHashMapUnmanaged(*const Zcu.File, Builder.Metadata), + debug_file_map: std.AutoHashMapUnmanaged(Zcu.File.Index, Builder.Metadata), debug_type_map: std.AutoHashMapUnmanaged(Type, Builder.Metadata), debug_unresolved_namespace_scopes: std.AutoArrayHashMapUnmanaged(InternPool.NamespaceIndex, Builder.Metadata), @@ -790,11 +790,13 @@ pub const Object = struct { /// version of the name and incorrectly get function not found in the llvm module. /// * it works for functions not all globals. /// Therefore, this table keeps track of the mapping. - decl_map: std.AutoHashMapUnmanaged(InternPool.DeclIndex, Builder.Global.Index), + nav_map: std.AutoHashMapUnmanaged(InternPool.Nav.Index, Builder.Global.Index), /// Same deal as `decl_map` but for anonymous declarations, which are always global constants. - anon_decl_map: std.AutoHashMapUnmanaged(InternPool.Index, Builder.Global.Index), - /// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction. - named_enum_map: std.AutoHashMapUnmanaged(InternPool.DeclIndex, Builder.Function.Index), + uav_map: std.AutoHashMapUnmanaged(InternPool.Index, Builder.Global.Index), + /// Maps enum types to their corresponding LLVM functions for implementing the `tag_name` instruction. + enum_tag_name_map: std.AutoHashMapUnmanaged(InternPool.Index, Builder.Global.Index), + /// Serves the same purpose as `enum_tag_name_map` but for the `is_named_enum_value` instruction. + named_enum_map: std.AutoHashMapUnmanaged(InternPool.Index, Builder.Function.Index), /// Maps Zig types to LLVM types. The table memory is backed by the GPA of /// the compiler. /// TODO when InternPool garbage collection is implemented, this map needs @@ -963,8 +965,9 @@ pub const Object = struct { .debug_type_map = .{}, .debug_unresolved_namespace_scopes = .{}, .target = target, - .decl_map = .{}, - .anon_decl_map = .{}, + .nav_map = .{}, + .uav_map = .{}, + .enum_tag_name_map = .{}, .named_enum_map = .{}, .type_map = .{}, .error_name_table = .none, @@ -981,8 +984,9 @@ pub const Object = struct { self.debug_file_map.deinit(gpa); self.debug_type_map.deinit(gpa); self.debug_unresolved_namespace_scopes.deinit(gpa); - self.decl_map.deinit(gpa); - self.anon_decl_map.deinit(gpa); + self.nav_map.deinit(gpa); + self.uav_map.deinit(gpa); + self.enum_tag_name_map.deinit(gpa); self.named_enum_map.deinit(gpa); self.type_map.deinit(gpa); self.builder.deinit(); @@ -1108,7 +1112,7 @@ pub const Object = struct { const fwd_ref = self.debug_unresolved_namespace_scopes.values()[i]; const namespace = zcu.namespacePtr(namespace_index); - const debug_type = try self.lowerDebugType(namespace.getType(zcu)); + const debug_type = try self.lowerDebugType(Type.fromInterned(namespace.owner_type)); self.builder.debugForwardReferenceSetType(fwd_ref, debug_type); } @@ -1328,24 +1332,22 @@ pub const Object = struct { assert(std.meta.eql(pt, o.pt)); const zcu = pt.zcu; const comp = zcu.comp; + const ip = &zcu.intern_pool; const func = zcu.funcInfo(func_index); - const decl_index = func.owner_decl; - const decl = zcu.declPtr(decl_index); - const namespace = zcu.namespacePtr(decl.src_namespace); - const file_scope = namespace.fileScope(zcu); - const owner_mod = file_scope.mod; - const fn_info = zcu.typeToFunc(decl.typeOf(zcu)).?; + const nav = ip.getNav(func.owner_nav); + const file_scope = zcu.navFileScopeIndex(func.owner_nav); + const owner_mod = zcu.fileByIndex(file_scope).mod; + const fn_ty = Type.fromInterned(func.ty); + const fn_info = zcu.typeToFunc(fn_ty).?; const target = owner_mod.resolved_target.result; - const ip = &zcu.intern_pool; - var dg: DeclGen = .{ + var ng: NavGen = .{ .object = o, - .decl_index = decl_index, - .decl = decl, + .nav_index = func.owner_nav, .err_msg = null, }; - const function_index = try o.resolveLlvmFunction(decl_index); + const function_index = try o.resolveLlvmFunction(func.owner_nav); var attributes = try function_index.ptrConst(&o.builder).attributes.toWip(&o.builder); defer attributes.deinit(&o.builder); @@ -1409,7 +1411,7 @@ pub const Object = struct { } }, &o.builder); } - if (decl.@"linksection".toSlice(ip)) |section| + if (nav.status.resolved.@"linksection".toSlice(ip)) |section| function_index.setSection(try o.builder.string(section), &o.builder); var deinit_wip = true; @@ -1422,7 +1424,7 @@ pub const Object = struct { var llvm_arg_i: u32 = 0; - // This gets the LLVM values from the function and stores them in `dg.args`. + // This gets the LLVM values from the function and stores them in `ng.args`. const sret = firstParamSRet(fn_info, pt, target); const ret_ptr: Builder.Value = if (sret) param: { const param = wip.arg(llvm_arg_i); @@ -1622,13 +1624,13 @@ pub const Object = struct { const file, const subprogram = if (!wip.strip) debug_info: { const file = try o.getDebugFile(file_scope); - const line_number = decl.navSrcLine(zcu) + 1; - const is_internal_linkage = decl.val.getExternFunc(zcu) == null; - const debug_decl_type = try o.lowerDebugType(decl.typeOf(zcu)); + const line_number = zcu.navSrcLine(func.owner_nav) + 1; + const is_internal_linkage = ip.indexToKey(nav.status.resolved.val) != .@"extern"; + const debug_decl_type = try o.lowerDebugType(fn_ty); const subprogram = try o.builder.debugSubprogram( file, - try o.builder.metadataString(decl.name.toSlice(ip)), + try o.builder.metadataString(nav.name.toSlice(ip)), try o.builder.metadataStringFromStrtabString(function_index.name(&o.builder)), line_number, line_number + func.lbrace_line, @@ -1654,7 +1656,7 @@ pub const Object = struct { .gpa = gpa, .air = air, .liveness = liveness, - .dg = &dg, + .ng = &ng, .wip = wip, .is_naked = fn_info.cc == .Naked, .ret_ptr = ret_ptr, @@ -1665,7 +1667,7 @@ pub const Object = struct { .sync_scope = if (owner_mod.single_threaded) .singlethread else .system, .file = file, .scope = subprogram, - .base_line = dg.decl.navSrcLine(zcu), + .base_line = zcu.navSrcLine(func.owner_nav), .prev_dbg_line = 0, .prev_dbg_column = 0, .err_ret_trace = err_ret_trace, @@ -1675,9 +1677,8 @@ pub const Object = struct { fg.genBody(air.getMainBody()) catch |err| switch (err) { error.CodegenFail => { - decl.analysis = .codegen_failure; - try zcu.failed_analysis.put(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); - dg.err_msg = null; + try zcu.failed_codegen.put(zcu.gpa, func.owner_nav, ng.err_msg.?); + ng.err_msg = null; return; }, else => |e| return e, @@ -1686,20 +1687,17 @@ pub const Object = struct { try fg.wip.finish(); } - pub fn updateDecl(self: *Object, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { + pub fn updateNav(self: *Object, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void { assert(std.meta.eql(pt, self.pt)); - const decl = pt.zcu.declPtr(decl_index); - var dg: DeclGen = .{ + var ng: NavGen = .{ .object = self, - .decl = decl, - .decl_index = decl_index, + .nav_index = nav_index, .err_msg = null, }; - dg.genDecl() catch |err| switch (err) { + ng.genDecl() catch |err| switch (err) { error.CodegenFail => { - decl.analysis = .codegen_failure; - try pt.zcu.failed_analysis.put(pt.zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); - dg.err_msg = null; + try pt.zcu.failed_codegen.put(pt.zcu.gpa, nav_index, ng.err_msg.?); + ng.err_msg = null; return; }, else => |e| return e, @@ -1714,19 +1712,18 @@ pub const Object = struct { ) link.File.UpdateExportsError!void { assert(std.meta.eql(pt, self.pt)); const zcu = pt.zcu; - const decl_index = switch (exported) { - .decl_index => |i| i, - .value => |val| return updateExportedValue(self, zcu, val, export_indices), + const nav_index = switch (exported) { + .nav => |nav| nav, + .uav => |uav| return updateExportedValue(self, zcu, uav, export_indices), }; const ip = &zcu.intern_pool; - const global_index = self.decl_map.get(decl_index).?; - const decl = zcu.declPtr(decl_index); + const global_index = self.nav_map.get(nav_index).?; const comp = zcu.comp; if (export_indices.len != 0) { return updateExportedGlobal(self, zcu, global_index, export_indices); } else { - const fqn = try self.builder.strtabString(decl.fqn.toSlice(ip)); + const fqn = try self.builder.strtabString(ip.getNav(nav_index).fqn.toSlice(ip)); try global_index.rename(fqn, &self.builder); global_index.setLinkage(.internal, &self.builder); if (comp.config.dll_export_fns) @@ -1745,7 +1742,7 @@ pub const Object = struct { const ip = &mod.intern_pool; const main_exp_name = try o.builder.strtabString(mod.all_exports.items[export_indices[0]].opts.name.toSlice(ip)); const global_index = i: { - const gop = try o.anon_decl_map.getOrPut(gpa, exported_value); + const gop = try o.uav_map.getOrPut(gpa, exported_value); if (gop.found_existing) { const global_index = gop.value_ptr.*; try global_index.rename(main_exp_name, &o.builder); @@ -1868,11 +1865,12 @@ pub const Object = struct { global.delete(&self.builder); } - fn getDebugFile(o: *Object, file: *const Zcu.File) Allocator.Error!Builder.Metadata { + fn getDebugFile(o: *Object, file_index: Zcu.File.Index) Allocator.Error!Builder.Metadata { const gpa = o.gpa; - const gop = try o.debug_file_map.getOrPut(gpa, file); - errdefer assert(o.debug_file_map.remove(file)); + const gop = try o.debug_file_map.getOrPut(gpa, file_index); + errdefer assert(o.debug_file_map.remove(file_index)); if (gop.found_existing) return gop.value_ptr.*; + const file = o.pt.zcu.fileByIndex(file_index); gop.value_ptr.* = try o.builder.debugFile( try o.builder.metadataString(std.fs.path.basename(file.sub_file_path)), dir_path: { @@ -1930,17 +1928,13 @@ pub const Object = struct { return debug_int_type; }, .Enum => { - const owner_decl_index = ty.getOwnerDecl(zcu); - const owner_decl = zcu.declPtr(owner_decl_index); - if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { - const debug_enum_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); + const debug_enum_type = try o.makeEmptyNamespaceDebugType(ty); try o.debug_type_map.put(gpa, ty, debug_enum_type); return debug_enum_type; } const enum_type = ip.loadEnumType(ty.toIntern()); - const enumerators = try gpa.alloc(Builder.Metadata, enum_type.names.len); defer gpa.free(enumerators); @@ -1963,9 +1957,11 @@ pub const Object = struct { ); } - const file_scope = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu); - const file = try o.getDebugFile(file_scope); - const scope = try o.namespaceToDebugScope(owner_decl.src_namespace); + const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file); + const scope = if (ty.getParentNamespace(zcu).?.unwrap()) |parent_namespace| + try o.namespaceToDebugScope(parent_namespace) + else + file; const name = try o.allocTypeName(ty); defer gpa.free(name); @@ -1974,7 +1970,7 @@ pub const Object = struct { try o.builder.metadataString(name), file, scope, - owner_decl.typeSrcLine(zcu) + 1, // Line + ty.typeDeclSrcLine(zcu).? + 1, // Line try o.lowerDebugType(int_ty), ty.abiSize(pt) * 8, (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, @@ -2138,14 +2134,18 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); - const owner_decl_index = ty.getOwnerDecl(zcu); - const owner_decl = zcu.declPtr(owner_decl_index); - const file_scope = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu); + + const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file); + const scope = if (ty.getParentNamespace(zcu).?.unwrap()) |parent_namespace| + try o.namespaceToDebugScope(parent_namespace) + else + file; + const debug_opaque_type = try o.builder.debugStructType( try o.builder.metadataString(name), - try o.getDebugFile(file_scope), - try o.namespaceToDebugScope(owner_decl.src_namespace), - owner_decl.typeSrcLine(zcu) + 1, // Line + file, + scope, + ty.typeDeclSrcLine(zcu).? + 1, // Line .none, // Underlying type 0, // Size 0, // Align @@ -2460,8 +2460,7 @@ pub const Object = struct { // into. Therefore we can satisfy this by making an empty namespace, // rather than changing the frontend to unnecessarily resolve the // struct field types. - const owner_decl_index = ty.getOwnerDecl(zcu); - const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); + const debug_struct_type = try o.makeEmptyNamespaceDebugType(ty); try o.debug_type_map.put(gpa, ty, debug_struct_type); return debug_struct_type; } @@ -2470,8 +2469,7 @@ pub const Object = struct { } if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { - const owner_decl_index = ty.getOwnerDecl(zcu); - const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); + const debug_struct_type = try o.makeEmptyNamespaceDebugType(ty); try o.debug_type_map.put(gpa, ty, debug_struct_type); return debug_struct_type; } @@ -2536,8 +2534,6 @@ pub const Object = struct { return debug_struct_type; }, .Union => { - const owner_decl_index = ty.getOwnerDecl(zcu); - const name = try o.allocTypeName(ty); defer gpa.free(name); @@ -2546,7 +2542,7 @@ pub const Object = struct { !ty.hasRuntimeBitsIgnoreComptime(pt) or !union_type.haveLayout(ip)) { - const debug_union_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); + const debug_union_type = try o.makeEmptyNamespaceDebugType(ty); try o.debug_type_map.put(gpa, ty, debug_union_type); return debug_union_type; } @@ -2762,8 +2758,7 @@ pub const Object = struct { fn namespaceToDebugScope(o: *Object, namespace_index: InternPool.NamespaceIndex) !Builder.Metadata { const zcu = o.pt.zcu; const namespace = zcu.namespacePtr(namespace_index); - const file_scope = namespace.fileScope(zcu); - if (namespace.parent == .none) return try o.getDebugFile(file_scope); + if (namespace.parent == .none) return try o.getDebugFile(namespace.file_scope); const gop = try o.debug_unresolved_namespace_scopes.getOrPut(o.gpa, namespace_index); @@ -2772,15 +2767,19 @@ pub const Object = struct { return gop.value_ptr.*; } - fn makeEmptyNamespaceDebugType(o: *Object, decl_index: InternPool.DeclIndex) !Builder.Metadata { + fn makeEmptyNamespaceDebugType(o: *Object, ty: Type) !Builder.Metadata { const zcu = o.pt.zcu; - const decl = zcu.declPtr(decl_index); - const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu); + const ip = &zcu.intern_pool; + const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file); + const scope = if (ty.getParentNamespace(zcu).?.unwrap()) |parent_namespace| + try o.namespaceToDebugScope(parent_namespace) + else + file; return o.builder.debugStructType( - try o.builder.metadataString(decl.name.toSlice(&zcu.intern_pool)), // TODO use fully qualified name - try o.getDebugFile(file_scope), - try o.namespaceToDebugScope(decl.src_namespace), - decl.typeSrcLine(zcu) + 1, + try o.builder.metadataString(ty.containerTypeName(ip).toSlice(ip)), // TODO use fully qualified name + file, + scope, + ty.typeDeclSrcLine(zcu).? + 1, .none, 0, 0, @@ -2791,25 +2790,24 @@ pub const Object = struct { fn getStackTraceType(o: *Object) Allocator.Error!Type { const pt = o.pt; const zcu = pt.zcu; + const ip = &zcu.intern_pool; const std_mod = zcu.std_mod; const std_file_imported = pt.importPkg(std_mod) catch unreachable; - const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "builtin", .no_embedded_nulls); - const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index); - const std_namespace = zcu.namespacePtr(zcu.declPtr(std_file_root_decl.unwrap().?).src_namespace); - const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }).?; + const builtin_str = try ip.getOrPutString(zcu.gpa, pt.tid, "builtin", .no_embedded_nulls); + const std_file_root_type = Type.fromInterned(zcu.fileRootType(std_file_imported.file_index)); + const std_namespace = ip.namespacePtr(std_file_root_type.getNamespaceIndex(zcu).unwrap().?); + const builtin_nav = std_namespace.pub_decls.getKeyAdapted(builtin_str, Zcu.Namespace.NameAdapter{ .zcu = zcu }).?; - const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "StackTrace", .no_embedded_nulls); + const stack_trace_str = try ip.getOrPutString(zcu.gpa, pt.tid, "StackTrace", .no_embedded_nulls); // buffer is only used for int_type, `builtin` is a struct. - const builtin_ty = zcu.declPtr(builtin_decl).val.toType(); + const builtin_ty = zcu.navValue(builtin_nav).toType(); const builtin_namespace = zcu.namespacePtrUnwrap(builtin_ty.getNamespaceIndex(zcu)).?; - const stack_trace_decl_index = builtin_namespace.decls.getKeyAdapted(stack_trace_str, Zcu.DeclAdapter{ .zcu = zcu }).?; - const stack_trace_decl = zcu.declPtr(stack_trace_decl_index); + const stack_trace_nav = builtin_namespace.pub_decls.getKeyAdapted(stack_trace_str, Zcu.Namespace.NameAdapter{ .zcu = zcu }).?; // Sema should have ensured that StackTrace was analyzed. - assert(stack_trace_decl.has_tv); - return stack_trace_decl.val.toType(); + return zcu.navValue(stack_trace_nav).toType(); } fn allocTypeName(o: *Object, ty: Type) Allocator.Error![:0]const u8 { @@ -2824,29 +2822,33 @@ pub const Object = struct { /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. fn resolveLlvmFunction( o: *Object, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, ) Allocator.Error!Builder.Function.Index { const pt = o.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = o.gpa; - const decl = zcu.declPtr(decl_index); - const namespace = zcu.namespacePtr(decl.src_namespace); - const owner_mod = namespace.fileScope(zcu).mod; - const zig_fn_type = decl.typeOf(zcu); - const gop = try o.decl_map.getOrPut(gpa, decl_index); + const nav = ip.getNav(nav_index); + const owner_mod = zcu.navFileScope(nav_index).mod; + const resolved = nav.status.resolved; + const val = Value.fromInterned(resolved.val); + const ty = val.typeOf(zcu); + const gop = try o.nav_map.getOrPut(gpa, nav_index); if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.function; - assert(decl.has_tv); - const fn_info = zcu.typeToFunc(zig_fn_type).?; + const fn_info = zcu.typeToFunc(ty).?; const target = owner_mod.resolved_target.result; const sret = firstParamSRet(fn_info, pt, target); - const is_extern = decl.isExtern(zcu); + const is_extern, const lib_name = switch (ip.indexToKey(val.toIntern())) { + .variable => |variable| .{ false, variable.lib_name }, + .@"extern" => |@"extern"| .{ true, @"extern".lib_name }, + else => .{ false, .none }, + }; const function_index = try o.builder.addFunction( - try o.lowerType(zig_fn_type), - try o.builder.strtabString((if (is_extern) decl.name else decl.fqn).toSlice(ip)), - toLlvmAddressSpace(decl.@"addrspace", target), + try o.lowerType(ty), + try o.builder.strtabString((if (is_extern) nav.name else nav.fqn).toSlice(ip)), + toLlvmAddressSpace(resolved.@"addrspace", target), ); gop.value_ptr.* = function_index.ptrConst(&o.builder).global; @@ -2860,12 +2862,12 @@ pub const Object = struct { if (target.isWasm()) { try attributes.addFnAttr(.{ .string = .{ .kind = try o.builder.string("wasm-import-name"), - .value = try o.builder.string(decl.name.toSlice(ip)), + .value = try o.builder.string(nav.name.toSlice(ip)), } }, &o.builder); - if (decl.getOwnedExternFunc(zcu).?.lib_name.toSlice(ip)) |lib_name| { - if (!std.mem.eql(u8, lib_name, "c")) try attributes.addFnAttr(.{ .string = .{ + if (lib_name.toSlice(ip)) |lib_name_slice| { + if (!std.mem.eql(u8, lib_name_slice, "c")) try attributes.addFnAttr(.{ .string = .{ .kind = try o.builder.string("wasm-import-module"), - .value = try o.builder.string(lib_name), + .value = try o.builder.string(lib_name_slice), } }, &o.builder); } } @@ -2901,8 +2903,8 @@ pub const Object = struct { else => function_index.setCallConv(toLlvmCallConv(fn_info.cc, target), &o.builder), } - if (decl.alignment != .none) - function_index.setAlignment(decl.alignment.toLlvm(), &o.builder); + if (resolved.alignment != .none) + function_index.setAlignment(resolved.alignment.toLlvm(), &o.builder); // Function attributes that are independent of analysis results of the function body. try o.addCommonFnAttributes(&attributes, owner_mod); @@ -3006,15 +3008,15 @@ pub const Object = struct { } } - fn resolveGlobalAnonDecl( + fn resolveGlobalUav( o: *Object, - decl_val: InternPool.Index, + uav: InternPool.Index, llvm_addr_space: Builder.AddrSpace, alignment: InternPool.Alignment, ) Error!Builder.Variable.Index { assert(alignment != .none); // TODO: Add address space to the anon_decl_map - const gop = try o.anon_decl_map.getOrPut(o.gpa, decl_val); + const gop = try o.uav_map.getOrPut(o.gpa, uav); if (gop.found_existing) { // Keep the greater of the two alignments. const variable_index = gop.value_ptr.ptr(&o.builder).kind.variable; @@ -3023,19 +3025,19 @@ pub const Object = struct { variable_index.setAlignment(max_alignment.toLlvm(), &o.builder); return variable_index; } - errdefer assert(o.anon_decl_map.remove(decl_val)); + errdefer assert(o.uav_map.remove(uav)); const mod = o.pt.zcu; - const decl_ty = mod.intern_pool.typeOf(decl_val); + const decl_ty = mod.intern_pool.typeOf(uav); const variable_index = try o.builder.addVariable( - try o.builder.strtabStringFmt("__anon_{d}", .{@intFromEnum(decl_val)}), + try o.builder.strtabStringFmt("__anon_{d}", .{@intFromEnum(uav)}), try o.lowerType(Type.fromInterned(decl_ty)), llvm_addr_space, ); gop.value_ptr.* = variable_index.ptrConst(&o.builder).global; - try variable_index.setInitializer(try o.lowerValue(decl_val), &o.builder); + try variable_index.setInitializer(try o.lowerValue(uav), &o.builder); variable_index.setLinkage(.internal, &o.builder); variable_index.setMutability(.constant, &o.builder); variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); @@ -3043,24 +3045,29 @@ pub const Object = struct { return variable_index; } - fn resolveGlobalDecl( + fn resolveGlobalNav( o: *Object, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, ) Allocator.Error!Builder.Variable.Index { - const gop = try o.decl_map.getOrPut(o.gpa, decl_index); + const gop = try o.nav_map.getOrPut(o.gpa, nav_index); if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable; - errdefer assert(o.decl_map.remove(decl_index)); + errdefer assert(o.nav_map.remove(nav_index)); const pt = o.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const decl = zcu.declPtr(decl_index); - const is_extern = decl.isExtern(zcu); + const nav = ip.getNav(nav_index); + const resolved = nav.status.resolved; + const is_extern, const is_threadlocal, const is_weak_linkage = switch (ip.indexToKey(resolved.val)) { + .variable => |variable| .{ false, variable.is_threadlocal, variable.is_weak_linkage }, + .@"extern" => |@"extern"| .{ true, @"extern".is_threadlocal, @"extern".is_weak_linkage }, + else => .{ false, false, false }, + }; const variable_index = try o.builder.addVariable( - try o.builder.strtabString((if (is_extern) decl.name else decl.fqn).toSlice(ip)), - try o.lowerType(decl.typeOf(zcu)), - toLlvmGlobalAddressSpace(decl.@"addrspace", zcu.getTarget()), + try o.builder.strtabString((if (is_extern) nav.name else nav.fqn).toSlice(ip)), + try o.lowerType(Type.fromInterned(nav.typeOf(ip))), + toLlvmGlobalAddressSpace(resolved.@"addrspace", zcu.getTarget()), ); gop.value_ptr.* = variable_index.ptrConst(&o.builder).global; @@ -3068,15 +3075,9 @@ pub const Object = struct { if (is_extern) { variable_index.setLinkage(.external, &o.builder); variable_index.setUnnamedAddr(.default, &o.builder); - if (decl.val.getVariable(zcu)) |decl_var| { - const decl_namespace = zcu.namespacePtr(decl.src_namespace); - const single_threaded = decl_namespace.fileScope(zcu).mod.single_threaded; - variable_index.setThreadLocal( - if (decl_var.is_threadlocal and !single_threaded) .generaldynamic else .default, - &o.builder, - ); - if (decl_var.is_weak_linkage) variable_index.setLinkage(.extern_weak, &o.builder); - } + if (is_threadlocal and !zcu.navFileScope(nav_index).mod.single_threaded) + variable_index.setThreadLocal(.generaldynamic, &o.builder); + if (is_weak_linkage) variable_index.setLinkage(.extern_weak, &o.builder); } else { variable_index.setLinkage(.internal, &o.builder); variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); @@ -3286,8 +3287,6 @@ pub const Object = struct { return int_ty; } - const decl = mod.declPtr(struct_type.decl.unwrap().?); - var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){}; defer llvm_field_types.deinit(o.gpa); // Although we can estimate how much capacity to add, these cannot be @@ -3351,7 +3350,7 @@ pub const Object = struct { ); } - const ty = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip))); + const ty = try o.builder.opaqueType(try o.builder.string(t.containerTypeName(ip).toSlice(ip))); try o.type_map.put(o.gpa, t.toIntern(), ty); o.builder.namedTypeSetBody( @@ -3440,8 +3439,6 @@ pub const Object = struct { return enum_tag_ty; } - const decl = mod.declPtr(union_obj.decl); - const aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[layout.most_aligned_field]); const aligned_field_llvm_ty = try o.lowerType(aligned_field_ty); @@ -3460,7 +3457,7 @@ pub const Object = struct { }; if (layout.tag_size == 0) { - const ty = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip))); + const ty = try o.builder.opaqueType(try o.builder.string(t.containerTypeName(ip).toSlice(ip))); try o.type_map.put(o.gpa, t.toIntern(), ty); o.builder.namedTypeSetBody( @@ -3488,7 +3485,7 @@ pub const Object = struct { llvm_fields_len += 1; } - const ty = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip))); + const ty = try o.builder.opaqueType(try o.builder.string(t.containerTypeName(ip).toSlice(ip))); try o.type_map.put(o.gpa, t.toIntern(), ty); o.builder.namedTypeSetBody( @@ -3500,8 +3497,7 @@ pub const Object = struct { .opaque_type => { const gop = try o.type_map.getOrPut(o.gpa, t.toIntern()); if (!gop.found_existing) { - const decl = mod.declPtr(ip.loadOpaqueType(t.toIntern()).decl); - gop.value_ptr.* = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip))); + gop.value_ptr.* = try o.builder.opaqueType(try o.builder.string(t.containerTypeName(ip).toSlice(ip))); } return gop.value_ptr.*; }, @@ -3512,7 +3508,7 @@ pub const Object = struct { .undef, .simple_value, .variable, - .extern_func, + .@"extern", .func, .int, .err, @@ -3632,15 +3628,13 @@ pub const Object = struct { const ty = Type.fromInterned(val_key.typeOf()); switch (val_key) { - .extern_func => |extern_func| { - const fn_decl_index = extern_func.decl; - const function_index = try o.resolveLlvmFunction(fn_decl_index); + .@"extern" => |@"extern"| { + const function_index = try o.resolveLlvmFunction(@"extern".owner_nav); const ptr = function_index.ptrConst(&o.builder).global.toConst(); return o.builder.convConst(ptr, llvm_int_ty); }, .func => |func| { - const fn_decl_index = func.owner_decl; - const function_index = try o.resolveLlvmFunction(fn_decl_index); + const function_index = try o.resolveLlvmFunction(func.owner_nav); const ptr = function_index.ptrConst(&o.builder).global.toConst(); return o.builder.convConst(ptr, llvm_int_ty); }, @@ -3783,14 +3777,12 @@ pub const Object = struct { .enum_literal, .empty_enum_value, => unreachable, // non-runtime values - .extern_func => |extern_func| { - const fn_decl_index = extern_func.decl; - const function_index = try o.resolveLlvmFunction(fn_decl_index); + .@"extern" => |@"extern"| { + const function_index = try o.resolveLlvmFunction(@"extern".owner_nav); return function_index.ptrConst(&o.builder).global.toConst(); }, .func => |func| { - const fn_decl_index = func.owner_decl; - const function_index = try o.resolveLlvmFunction(fn_decl_index); + const function_index = try o.resolveLlvmFunction(func.owner_nav); return function_index.ptrConst(&o.builder).global.toConst(); }, .int => { @@ -4284,14 +4276,14 @@ pub const Object = struct { const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr; const offset: u64 = prev_offset + ptr.byte_offset; return switch (ptr.base_addr) { - .decl => |decl| { - const base_ptr = try o.lowerDeclRefValue(decl); + .nav => |nav| { + const base_ptr = try o.lowerNavRefValue(nav); return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{ try o.builder.intConst(.i64, offset), }); }, - .anon_decl => |ad| { - const base_ptr = try o.lowerAnonDeclRef(ad); + .uav => |uav| { + const base_ptr = try o.lowerUavRef(uav); return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{ try o.builder.intConst(.i64, offset), }); @@ -4332,39 +4324,37 @@ pub const Object = struct { }; } - /// This logic is very similar to `lowerDeclRefValue` but for anonymous declarations. + /// This logic is very similar to `lowerNavRefValue` but for anonymous declarations. /// Maybe the logic could be unified. - fn lowerAnonDeclRef( + fn lowerUavRef( o: *Object, - anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, + uav: InternPool.Key.Ptr.BaseAddr.Uav, ) Error!Builder.Constant { const pt = o.pt; const mod = pt.zcu; const ip = &mod.intern_pool; - const decl_val = anon_decl.val; - const decl_ty = Type.fromInterned(ip.typeOf(decl_val)); + const uav_val = uav.val; + const uav_ty = Type.fromInterned(ip.typeOf(uav_val)); const target = mod.getTarget(); - if (Value.fromInterned(decl_val).getFunction(mod)) |func| { - _ = func; - @panic("TODO"); - } else if (Value.fromInterned(decl_val).getExternFunc(mod)) |func| { - _ = func; - @panic("TODO"); + switch (ip.indexToKey(uav_val)) { + .func => @panic("TODO"), + .@"extern" => @panic("TODO"), + else => {}, } - const ptr_ty = Type.fromInterned(anon_decl.orig_ty); + const ptr_ty = Type.fromInterned(uav.orig_ty); - const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or - (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty); + const is_fn_body = uav_ty.zigTypeTag(mod) == .Fn; + if ((!is_fn_body and !uav_ty.hasRuntimeBits(pt)) or + (is_fn_body and mod.typeToFunc(uav_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty); if (is_fn_body) @panic("TODO"); const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(mod), target); const alignment = ptr_ty.ptrAlignment(pt); - const llvm_global = (try o.resolveGlobalAnonDecl(decl_val, llvm_addr_space, alignment)).ptrConst(&o.builder).global; + const llvm_global = (try o.resolveGlobalUav(uav.val, llvm_addr_space, alignment)).ptrConst(&o.builder).global; const llvm_val = try o.builder.convConst( llvm_global.toConst(), @@ -4374,44 +4364,41 @@ pub const Object = struct { return o.builder.convConst(llvm_val, try o.lowerType(ptr_ty)); } - fn lowerDeclRefValue(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant { + fn lowerNavRefValue(o: *Object, nav_index: InternPool.Nav.Index) Allocator.Error!Builder.Constant { const pt = o.pt; - const mod = pt.zcu; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; // In the case of something like: // fn foo() void {} // const bar = foo; // ... &bar; // `bar` is just an alias and we actually want to lower a reference to `foo`. - const decl = mod.declPtr(decl_index); - if (decl.val.getFunction(mod)) |func| { - if (func.owner_decl != decl_index) { - return o.lowerDeclRefValue(func.owner_decl); - } - } else if (decl.val.getExternFunc(mod)) |func| { - if (func.decl != decl_index) { - return o.lowerDeclRefValue(func.decl); - } - } + const owner_nav_index = switch (ip.indexToKey(zcu.navValue(nav_index).toIntern())) { + .func => |func| func.owner_nav, + .@"extern" => |@"extern"| @"extern".owner_nav, + else => nav_index, + }; + const owner_nav = ip.getNav(owner_nav_index); - const decl_ty = decl.typeOf(mod); - const ptr_ty = try decl.declPtrType(pt); + const nav_ty = Type.fromInterned(owner_nav.typeOf(ip)); + const ptr_ty = try pt.navPtrType(owner_nav_index); - const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or - (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) + const is_fn_body = nav_ty.zigTypeTag(zcu) == .Fn; + if ((!is_fn_body and !nav_ty.hasRuntimeBits(pt)) or + (is_fn_body and zcu.typeToFunc(nav_ty).?.is_generic)) { return o.lowerPtrToVoid(ptr_ty); } const llvm_global = if (is_fn_body) - (try o.resolveLlvmFunction(decl_index)).ptrConst(&o.builder).global + (try o.resolveLlvmFunction(owner_nav_index)).ptrConst(&o.builder).global else - (try o.resolveGlobalDecl(decl_index)).ptrConst(&o.builder).global; + (try o.resolveGlobalNav(owner_nav_index)).ptrConst(&o.builder).global; const llvm_val = try o.builder.convConst( llvm_global.toConst(), - try o.builder.ptrType(toLlvmAddressSpace(decl.@"addrspace", mod.getTarget())), + try o.builder.ptrType(toLlvmAddressSpace(owner_nav.status.resolved.@"addrspace", zcu.getTarget())), ); return o.builder.convConst(llvm_val, try o.lowerType(ptr_ty)); @@ -4553,18 +4540,16 @@ pub const Object = struct { const ip = &zcu.intern_pool; const enum_type = ip.loadEnumType(enum_ty.toIntern()); - // TODO: detect when the type changes and re-emit this function. - const gop = try o.decl_map.getOrPut(o.gpa, enum_type.decl); + const gop = try o.enum_tag_name_map.getOrPut(o.gpa, enum_ty.toIntern()); if (gop.found_existing) return gop.value_ptr.ptrConst(&o.builder).kind.function; - errdefer assert(o.decl_map.remove(enum_type.decl)); + errdefer assert(o.enum_tag_name_map.remove(enum_ty.toIntern())); const usize_ty = try o.lowerType(Type.usize); const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0); - const decl = zcu.declPtr(enum_type.decl); const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(ret_ty, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal), - try o.builder.strtabStringFmt("__zig_tag_name_{}", .{decl.fqn.fmt(ip)}), + try o.builder.strtabStringFmt("__zig_tag_name_{}", .{enum_type.name.fmt(ip)}), toLlvmAddressSpace(.generic, target), ); @@ -4624,86 +4609,73 @@ pub const Object = struct { } }; -pub const DeclGen = struct { +pub const NavGen = struct { object: *Object, - decl: *Zcu.Decl, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, err_msg: ?*Zcu.ErrorMsg, - fn ownerModule(dg: DeclGen) *Package.Module { - const o = dg.object; - const zcu = o.pt.zcu; - const namespace = zcu.namespacePtr(dg.decl.src_namespace); - const file_scope = namespace.fileScope(zcu); - return file_scope.mod; + fn ownerModule(ng: NavGen) *Package.Module { + return ng.object.pt.zcu.navFileScope(ng.nav_index).mod; } - fn todo(dg: *DeclGen, comptime format: []const u8, args: anytype) Error { + fn todo(ng: *NavGen, comptime format: []const u8, args: anytype) Error { @setCold(true); - assert(dg.err_msg == null); - const o = dg.object; + assert(ng.err_msg == null); + const o = ng.object; const gpa = o.gpa; - const src_loc = dg.decl.navSrcLoc(o.pt.zcu); - dg.err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args); + const src_loc = o.pt.zcu.navSrcLoc(ng.nav_index); + ng.err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args); return error.CodegenFail; } - fn genDecl(dg: *DeclGen) !void { - const o = dg.object; + fn genDecl(ng: *NavGen) !void { + const o = ng.object; const pt = o.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const decl = dg.decl; - const decl_index = dg.decl_index; - assert(decl.has_tv); + const nav_index = ng.nav_index; + const nav = ip.getNav(nav_index); + const resolved = nav.status.resolved; + + const is_extern, const lib_name, const is_threadlocal, const is_weak_linkage, const is_const, const init_val, const owner_nav = switch (ip.indexToKey(resolved.val)) { + .variable => |variable| .{ false, variable.lib_name, variable.is_threadlocal, variable.is_weak_linkage, false, variable.init, variable.owner_nav }, + .@"extern" => |@"extern"| .{ true, @"extern".lib_name, @"extern".is_threadlocal, @"extern".is_weak_linkage, @"extern".is_const, .none, @"extern".owner_nav }, + else => .{ false, .none, false, false, true, resolved.val, nav_index }, + }; + const ty = Type.fromInterned(nav.typeOf(ip)); - if (decl.val.getExternFunc(zcu)) |extern_func| { - _ = try o.resolveLlvmFunction(extern_func.decl); + if (is_extern and ip.isFunctionType(ty.toIntern())) { + _ = try o.resolveLlvmFunction(owner_nav); } else { - const variable_index = try o.resolveGlobalDecl(decl_index); - variable_index.setAlignment( - decl.getAlignment(pt).toLlvm(), - &o.builder, - ); - if (decl.@"linksection".toSlice(ip)) |section| + const variable_index = try o.resolveGlobalNav(nav_index); + variable_index.setAlignment(pt.navAlignment(nav_index).toLlvm(), &o.builder); + if (resolved.@"linksection".toSlice(ip)) |section| variable_index.setSection(try o.builder.string(section), &o.builder); - assert(decl.has_tv); - const init_val = if (decl.val.getVariable(zcu)) |decl_var| decl_var.init else init_val: { - variable_index.setMutability(.constant, &o.builder); - break :init_val decl.val.toIntern(); - }; + if (is_const) variable_index.setMutability(.constant, &o.builder); try variable_index.setInitializer(switch (init_val) { .none => .no_init, else => try o.lowerValue(init_val), }, &o.builder); - if (decl.val.getVariable(zcu)) |decl_var| { - const decl_namespace = zcu.namespacePtr(decl.src_namespace); - const single_threaded = decl_namespace.fileScope(zcu).mod.single_threaded; - variable_index.setThreadLocal( - if (decl_var.is_threadlocal and !single_threaded) .generaldynamic else .default, - &o.builder, - ); - } - - const line_number = decl.navSrcLine(zcu) + 1; + const file_scope = zcu.navFileScopeIndex(nav_index); + const mod = zcu.fileByIndex(file_scope).mod; + if (is_threadlocal and !mod.single_threaded) + variable_index.setThreadLocal(.generaldynamic, &o.builder); - const namespace = zcu.namespacePtr(decl.src_namespace); - const file_scope = namespace.fileScope(zcu); - const owner_mod = file_scope.mod; + const line_number = zcu.navSrcLine(nav_index) + 1; - if (!owner_mod.strip) { + if (!mod.strip) { const debug_file = try o.getDebugFile(file_scope); const debug_global_var = try o.builder.debugGlobalVar( - try o.builder.metadataString(decl.name.toSlice(ip)), // Name + try o.builder.metadataString(nav.name.toSlice(ip)), // Name try o.builder.metadataStringFromStrtabString(variable_index.name(&o.builder)), // Linkage name debug_file, // File debug_file, // Scope line_number, - try o.lowerDebugType(decl.typeOf(zcu)), + try o.lowerDebugType(ty), variable_index, - .{ .local = !decl.isExtern(zcu) }, + .{ .local = !is_extern }, ); const debug_expression = try o.builder.debugExpression(&.{}); @@ -4718,18 +4690,18 @@ pub const DeclGen = struct { } } - if (decl.isExtern(zcu)) { - const global_index = o.decl_map.get(decl_index).?; + if (is_extern) { + const global_index = o.nav_map.get(nav_index).?; const decl_name = decl_name: { - if (zcu.getTarget().isWasm() and decl.typeOf(zcu).zigTypeTag(zcu) == .Fn) { - if (decl.getOwnedExternFunc(zcu).?.lib_name.toSlice(ip)) |lib_name| { - if (!std.mem.eql(u8, lib_name, "c")) { - break :decl_name try o.builder.strtabStringFmt("{}|{s}", .{ decl.name.fmt(ip), lib_name }); + if (zcu.getTarget().isWasm() and ty.zigTypeTag(zcu) == .Fn) { + if (lib_name.toSlice(ip)) |lib_name_slice| { + if (!std.mem.eql(u8, lib_name_slice, "c")) { + break :decl_name try o.builder.strtabStringFmt("{}|{s}", .{ nav.name.fmt(ip), lib_name_slice }); } } } - break :decl_name try o.builder.strtabString(decl.name.toSlice(ip)); + break :decl_name try o.builder.strtabString(nav.name.toSlice(ip)); }; if (o.builder.getGlobal(decl_name)) |other_global| { @@ -4746,16 +4718,14 @@ pub const DeclGen = struct { if (zcu.comp.config.dll_export_fns) global_index.setDllStorageClass(.default, &o.builder); - if (decl.val.getVariable(zcu)) |decl_var| { - if (decl_var.is_weak_linkage) global_index.setLinkage(.extern_weak, &o.builder); - } + if (is_weak_linkage) global_index.setLinkage(.extern_weak, &o.builder); } } }; pub const FuncGen = struct { gpa: Allocator, - dg: *DeclGen, + ng: *NavGen, air: Air, liveness: Liveness, wip: Builder.WipFunction, @@ -4815,7 +4785,7 @@ pub const FuncGen = struct { fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) Error { @setCold(true); - return self.dg.todo(format, args); + return self.ng.todo(format, args); } fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !Builder.Value { @@ -4823,13 +4793,13 @@ pub const FuncGen = struct { const gop = try self.func_inst_table.getOrPut(gpa, inst); if (gop.found_existing) return gop.value_ptr.*; - const llvm_val = try self.resolveValue((try self.air.value(inst, self.dg.object.pt)).?); + const llvm_val = try self.resolveValue((try self.air.value(inst, self.ng.object.pt)).?); gop.value_ptr.* = llvm_val.toValue(); return llvm_val.toValue(); } fn resolveValue(self: *FuncGen, val: Value) Error!Builder.Constant { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const ty = val.typeOf(pt.zcu); const llvm_val = try o.lowerValue(val.toIntern()); @@ -4855,7 +4825,7 @@ pub const FuncGen = struct { } fn resolveNullOptUsize(self: *FuncGen) Error!Builder.Constant { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; if (o.null_opt_usize == .no_init) { o.null_opt_usize = try self.resolveValue(Value.fromInterned(try pt.intern(.{ .opt = .{ @@ -4867,7 +4837,7 @@ pub const FuncGen = struct { } fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); @@ -5132,20 +5102,19 @@ pub const FuncGen = struct { defer self.scope = old_scope; if (maybe_inline_func) |inline_func| { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const zcu = pt.zcu; + const ip = &zcu.intern_pool; const func = zcu.funcInfo(inline_func); - const decl_index = func.owner_decl; - const decl = zcu.declPtr(decl_index); - const namespace = zcu.namespacePtr(decl.src_namespace); - const file_scope = namespace.fileScope(zcu); - const owner_mod = file_scope.mod; + const nav = ip.getNav(func.owner_nav); + const file_scope = zcu.navFileScopeIndex(func.owner_nav); + const mod = zcu.fileByIndex(file_scope).mod; self.file = try o.getDebugFile(file_scope); - const line_number = decl.navSrcLine(zcu) + 1; + const line_number = zcu.navSrcLine(func.owner_nav) + 1; self.inlined = self.wip.debug_location; const fn_ty = try pt.funcType(.{ @@ -5155,15 +5124,15 @@ pub const FuncGen = struct { self.scope = try o.builder.debugSubprogram( self.file, - try o.builder.metadataString(decl.name.toSlice(&zcu.intern_pool)), - try o.builder.metadataString(decl.fqn.toSlice(&zcu.intern_pool)), + try o.builder.metadataString(nav.name.toSlice(&zcu.intern_pool)), + try o.builder.metadataString(nav.fqn.toSlice(&zcu.intern_pool)), line_number, line_number + func.lbrace_line, try o.lowerDebugType(fn_ty), .{ .di_flags = .{ .StaticMember = true }, .sp_flags = .{ - .Optimized = owner_mod.optimize_mode != .Debug, + .Optimized = mod.optimize_mode != .Debug, .Definition = true, .LocalToUnit = true, // TODO: we can't know this at this point, since the function could be exported later! }, @@ -5171,7 +5140,7 @@ pub const FuncGen = struct { o.debug_compile_unit, ); - self.base_line = decl.navSrcLine(zcu); + self.base_line = zcu.navSrcLine(func.owner_nav); const inlined_at_location = try self.wip.debug_location.toMetadata(&o.builder); self.wip.debug_location = .{ .location = .{ @@ -5183,7 +5152,7 @@ pub const FuncGen = struct { }; } - self.scope = try self.dg.object.builder.debugLexicalBlock( + self.scope = try self.ng.object.builder.debugLexicalBlock( self.scope, self.file, self.prev_dbg_line, @@ -5214,7 +5183,7 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ip = &mod.intern_pool; @@ -5515,14 +5484,15 @@ pub const FuncGen = struct { } fn buildSimplePanic(fg: *FuncGen, panic_id: Zcu.PanicId) !void { - const o = fg.dg.object; - const mod = o.pt.zcu; - const msg_decl_index = mod.panic_messages[@intFromEnum(panic_id)].unwrap().?; - const msg_decl = mod.declPtr(msg_decl_index); - const msg_len = msg_decl.typeOf(mod).childType(mod).arrayLen(mod); - const msg_ptr = try o.lowerValue(msg_decl.val.toIntern()); + const o = fg.ng.object; + const zcu = o.pt.zcu; + const ip = &zcu.intern_pool; + const msg_nav_index = zcu.panic_messages[@intFromEnum(panic_id)].unwrap().?; + const msg_nav = ip.getNav(msg_nav_index); + const msg_len = Type.fromInterned(msg_nav.typeOf(ip)).childType(zcu).arrayLen(zcu); + const msg_ptr = try o.lowerValue(msg_nav.status.resolved.val); const null_opt_addr_global = try fg.resolveNullOptUsize(); - const target = mod.getTarget(); + const target = zcu.getTarget(); const llvm_usize = try o.lowerType(Type.usize); // example: // call fastcc void @test2.panic( @@ -5531,10 +5501,10 @@ pub const FuncGen = struct { // ptr null, ; stack trace // ptr @2, ; addr (null ?usize) // ) - const panic_func = mod.funcInfo(mod.panic_func_index); - const panic_decl = mod.declPtr(panic_func.owner_decl); - const fn_info = mod.typeToFunc(panic_decl.typeOf(mod)).?; - const panic_global = try o.resolveLlvmFunction(panic_func.owner_decl); + const panic_func = zcu.funcInfo(zcu.panic_func_index); + const panic_nav = ip.getNav(panic_func.owner_nav); + const fn_info = zcu.typeToFunc(Type.fromInterned(panic_nav.typeOf(ip))).?; + const panic_global = try o.resolveLlvmFunction(panic_func.owner_nav); _ = try fg.wip.call( .normal, toLlvmCallConv(fn_info.cc, target), @@ -5553,9 +5523,10 @@ pub const FuncGen = struct { } fn airRet(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; + const ip = &mod.intern_pool; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ret_ty = self.typeOf(un_op); @@ -5581,7 +5552,7 @@ pub const FuncGen = struct { len, if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal, ); - const owner_mod = self.dg.ownerModule(); + const owner_mod = self.ng.ownerModule(); if (owner_mod.valgrind) { try self.valgrindMarkUndef(self.ret_ptr, len); } @@ -5602,7 +5573,7 @@ pub const FuncGen = struct { _ = try self.wip.retVoid(); return .none; } - const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?; + const fn_info = mod.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (Type.fromInterned(fn_info.return_type).isError(mod)) { // Functions with an empty error set are emitted with an error code @@ -5631,7 +5602,7 @@ pub const FuncGen = struct { len, .normal, ); - const owner_mod = self.dg.ownerModule(); + const owner_mod = self.ng.ownerModule(); if (owner_mod.valgrind) { try self.valgrindMarkUndef(rp, len); } @@ -5659,13 +5630,14 @@ pub const FuncGen = struct { } fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; + const ip = &mod.intern_pool; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(mod); - const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?; + const fn_info = mod.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (Type.fromInterned(fn_info.return_type).isError(mod)) { // Functions with an empty error set are emitted with an error code @@ -5689,7 +5661,7 @@ pub const FuncGen = struct { } fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const list = try self.resolveInst(ty_op.operand); const arg_ty = ty_op.ty.toType(); @@ -5699,7 +5671,7 @@ pub const FuncGen = struct { } fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const src_list = try self.resolveInst(ty_op.operand); @@ -5725,7 +5697,7 @@ pub const FuncGen = struct { } fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const va_list_ty = self.typeOfIndex(inst); const llvm_va_list_ty = try o.lowerType(va_list_ty); @@ -5767,7 +5739,7 @@ pub const FuncGen = struct { } fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const llvm_fn = try o.getCmpLtErrorsLenFunction(); @@ -5790,7 +5762,7 @@ pub const FuncGen = struct { lhs: Builder.Value, rhs: Builder.Value, ) Allocator.Error!Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const scalar_ty = operand_ty.scalarType(mod); @@ -5897,7 +5869,7 @@ pub const FuncGen = struct { maybe_inline_func: ?InternPool.Index, body: []const Air.Inst.Index, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst_ty = self.typeOfIndex(inst); @@ -5948,7 +5920,7 @@ pub const FuncGen = struct { } fn airBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br; const block = self.blocks.get(branch.block_inst).?; @@ -5988,7 +5960,7 @@ pub const FuncGen = struct { } fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const inst = body_tail[0]; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -6003,7 +5975,7 @@ pub const FuncGen = struct { } fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); @@ -6023,7 +5995,7 @@ pub const FuncGen = struct { can_elide_load: bool, is_unused: bool, ) !Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; const mod = pt.zcu; const payload_ty = err_union_ty.errorUnionPayload(mod); @@ -6088,7 +6060,7 @@ pub const FuncGen = struct { } fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const cond = try self.resolveInst(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); @@ -6152,7 +6124,7 @@ pub const FuncGen = struct { } fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); @@ -6176,7 +6148,7 @@ pub const FuncGen = struct { } fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6195,7 +6167,7 @@ pub const FuncGen = struct { } fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6280,7 +6252,7 @@ pub const FuncGen = struct { ) !Builder.Value { _ = fast; - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const target = mod.getTarget(); @@ -6342,13 +6314,13 @@ pub const FuncGen = struct { } fn sliceOrArrayPtr(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const mod = o.pt.zcu; return if (ty.isSlice(mod)) fg.wip.extractValue(ptr, &.{0}, "") else ptr; } fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; const mod = pt.zcu; const llvm_usize = try o.lowerType(Type.usize); @@ -6378,7 +6350,7 @@ pub const FuncGen = struct { } fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); @@ -6389,7 +6361,7 @@ pub const FuncGen = struct { } fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -6413,7 +6385,7 @@ pub const FuncGen = struct { } fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -6427,7 +6399,7 @@ pub const FuncGen = struct { } fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -6460,7 +6432,7 @@ pub const FuncGen = struct { } fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -6486,7 +6458,7 @@ pub const FuncGen = struct { } fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -6529,7 +6501,7 @@ pub const FuncGen = struct { } fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -6635,7 +6607,7 @@ pub const FuncGen = struct { } fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -6697,7 +6669,7 @@ pub const FuncGen = struct { } fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const operand = try self.resolveInst(pl_op.operand); @@ -6729,7 +6701,7 @@ pub const FuncGen = struct { } fn airDbgVarVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const operand = try self.resolveInst(pl_op.operand); const operand_ty = self.typeOf(pl_op.operand); @@ -6746,7 +6718,7 @@ pub const FuncGen = struct { ); const pt = o.pt; - const owner_mod = self.dg.ownerModule(); + const owner_mod = self.ng.ownerModule(); if (isByRef(operand_ty, pt)) { _ = try self.wip.callIntrinsic( .normal, @@ -6800,7 +6772,7 @@ pub const FuncGen = struct { // We don't have such an assembler implemented yet though. For now, // this implementation feeds the inline assembly code directly to LLVM. - const o = self.dg.object; + const o = self.ng.object; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; @@ -7181,7 +7153,7 @@ pub const FuncGen = struct { operand_is_ptr: bool, cond: Builder.IntegerCondition, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -7226,7 +7198,7 @@ pub const FuncGen = struct { cond: Builder.IntegerCondition, operand_is_ptr: bool, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -7266,7 +7238,7 @@ pub const FuncGen = struct { } fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -7288,7 +7260,7 @@ pub const FuncGen = struct { fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { comptime assert(optional_layout_version == 3); - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -7320,7 +7292,7 @@ pub const FuncGen = struct { } fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -7345,7 +7317,7 @@ pub const FuncGen = struct { body_tail: []const Air.Inst.Index, operand_is_ptr: bool, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -7381,7 +7353,7 @@ pub const FuncGen = struct { inst: Air.Inst.Index, operand_is_ptr: bool, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -7415,7 +7387,7 @@ pub const FuncGen = struct { } fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -7456,7 +7428,7 @@ pub const FuncGen = struct { } fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; @@ -7502,7 +7474,7 @@ pub const FuncGen = struct { } fn airWrapOptional(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -7536,7 +7508,7 @@ pub const FuncGen = struct { } fn airWrapErrUnionPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -7577,7 +7549,7 @@ pub const FuncGen = struct { } fn airWrapErrUnionErr(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -7618,7 +7590,7 @@ pub const FuncGen = struct { } fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const index = pl_op.payload; const llvm_usize = try o.lowerType(Type.usize); @@ -7628,7 +7600,7 @@ pub const FuncGen = struct { } fn airWasmMemoryGrow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const index = pl_op.payload; const llvm_isize = try o.lowerType(Type.isize); @@ -7638,7 +7610,7 @@ pub const FuncGen = struct { } fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem; @@ -7661,7 +7633,7 @@ pub const FuncGen = struct { } fn airMin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7681,7 +7653,7 @@ pub const FuncGen = struct { } fn airMax(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7701,7 +7673,7 @@ pub const FuncGen = struct { } fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); @@ -7711,7 +7683,7 @@ pub const FuncGen = struct { } fn airAdd(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7729,7 +7701,7 @@ pub const FuncGen = struct { signed_intrinsic: Builder.Intrinsic, unsigned_intrinsic: Builder.Intrinsic, ) !Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const mod = o.pt.zcu; const bin_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -7777,7 +7749,7 @@ pub const FuncGen = struct { } fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7797,7 +7769,7 @@ pub const FuncGen = struct { } fn airSub(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7818,7 +7790,7 @@ pub const FuncGen = struct { } fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7838,7 +7810,7 @@ pub const FuncGen = struct { } fn airMul(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7859,7 +7831,7 @@ pub const FuncGen = struct { } fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7888,7 +7860,7 @@ pub const FuncGen = struct { } fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7904,7 +7876,7 @@ pub const FuncGen = struct { } fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7936,7 +7908,7 @@ pub const FuncGen = struct { } fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7954,7 +7926,7 @@ pub const FuncGen = struct { } fn airRem(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7971,7 +7943,7 @@ pub const FuncGen = struct { } fn airMod(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -8007,7 +7979,7 @@ pub const FuncGen = struct { } fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -8029,7 +8001,7 @@ pub const FuncGen = struct { } fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -8057,7 +8029,7 @@ pub const FuncGen = struct { signed_intrinsic: Builder.Intrinsic, unsigned_intrinsic: Builder.Intrinsic, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -8111,7 +8083,7 @@ pub const FuncGen = struct { result_vector: Builder.Value, vector_len: usize, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; assert(args_vectors.len <= 3); var i: usize = 0; @@ -8143,7 +8115,7 @@ pub const FuncGen = struct { param_types: []const Builder.Type, return_type: Builder.Type, ) Allocator.Error!Builder.Function.Index { - const o = self.dg.object; + const o = self.ng.object; if (o.builder.getGlobal(fn_name)) |global| return switch (global.ptrConst(&o.builder).kind) { .alias => |alias| alias.getAliasee(&o.builder).ptrConst(&o.builder).kind.function, .function => |function| function, @@ -8165,7 +8137,7 @@ pub const FuncGen = struct { ty: Type, params: [2]Builder.Value, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); @@ -8271,7 +8243,7 @@ pub const FuncGen = struct { comptime params_len: usize, params: [params_len]Builder.Value, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); @@ -8412,7 +8384,7 @@ pub const FuncGen = struct { } fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -8483,7 +8455,7 @@ pub const FuncGen = struct { } fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -8501,7 +8473,7 @@ pub const FuncGen = struct { } fn airShl(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -8514,7 +8486,7 @@ pub const FuncGen = struct { } fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -8557,7 +8529,7 @@ pub const FuncGen = struct { } fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -8576,7 +8548,7 @@ pub const FuncGen = struct { } fn airAbs(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -8598,7 +8570,7 @@ pub const FuncGen = struct { } fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const dest_ty = self.typeOfIndex(inst); @@ -8614,7 +8586,7 @@ pub const FuncGen = struct { } fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const dest_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); @@ -8622,7 +8594,7 @@ pub const FuncGen = struct { } fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -8656,7 +8628,7 @@ pub const FuncGen = struct { } fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -8696,7 +8668,7 @@ pub const FuncGen = struct { } fn airIntFromPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); @@ -8714,7 +8686,7 @@ pub const FuncGen = struct { } fn bitCast(self: *FuncGen, operand: Builder.Value, operand_ty: Type, inst_ty: Type) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const operand_is_ref = isByRef(operand_ty, pt); @@ -8739,7 +8711,7 @@ pub const FuncGen = struct { if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) { const elem_ty = operand_ty.childType(mod); if (!result_is_ref) { - return self.dg.todo("implement bitcast vector to non-ref array", .{}); + return self.ng.todo("implement bitcast vector to non-ref array", .{}); } const alignment = inst_ty.abiAlignment(pt).toLlvm(); const array_ptr = try self.buildAllocaWorkaround(inst_ty, alignment); @@ -8766,7 +8738,7 @@ pub const FuncGen = struct { } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) { const elem_ty = operand_ty.childType(mod); const llvm_vector_ty = try o.lowerType(inst_ty); - if (!operand_is_ref) return self.dg.todo("implement bitcast non-ref array to vector", .{}); + if (!operand_is_ref) return self.ng.todo("implement bitcast non-ref array to vector", .{}); const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8; if (bitcast_ok) { @@ -8831,9 +8803,9 @@ pub const FuncGen = struct { } fn airArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const arg_val = self.args[self.arg_index]; self.arg_index += 1; @@ -8846,9 +8818,8 @@ pub const FuncGen = struct { const name = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name; if (name == .none) return arg_val; - const func_index = self.dg.decl.getOwnedFunctionIndex(); - const func = mod.funcInfo(func_index); - const lbrace_line = mod.declPtr(func.owner_decl).navSrcLine(mod) + func.lbrace_line + 1; + const func = zcu.funcInfo(zcu.navValue(self.ng.nav_index).toIntern()); + const lbrace_line = zcu.navSrcLine(func.owner_nav) + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const debug_parameter = try o.builder.debugParameter( @@ -8870,7 +8841,7 @@ pub const FuncGen = struct { }, }; - const owner_mod = self.dg.ownerModule(); + const mod = self.ng.ownerModule(); if (isByRef(inst_ty, pt)) { _ = try self.wip.callIntrinsic( .normal, @@ -8884,7 +8855,7 @@ pub const FuncGen = struct { }, "", ); - } else if (owner_mod.optimize_mode == .Debug) { + } else if (mod.optimize_mode == .Debug) { const alignment = inst_ty.abiAlignment(pt).toLlvm(); const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, arg_val, alloca, alignment); @@ -8920,7 +8891,7 @@ pub const FuncGen = struct { } fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ptr_ty = self.typeOfIndex(inst); @@ -8934,7 +8905,7 @@ pub const FuncGen = struct { } fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ptr_ty = self.typeOfIndex(inst); @@ -8954,7 +8925,7 @@ pub const FuncGen = struct { llvm_ty: Builder.Type, alignment: Builder.Alignment, ) Allocator.Error!Builder.Value { - const target = self.dg.object.pt.zcu.getTarget(); + const target = self.ng.object.pt.zcu.getTarget(); return buildAllocaInner(&self.wip, llvm_ty, alignment, target); } @@ -8964,12 +8935,12 @@ pub const FuncGen = struct { ty: Type, alignment: Builder.Alignment, ) Allocator.Error!Builder.Value { - const o = self.dg.object; + const o = self.ng.object; return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.pt), .i8), alignment); } fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -8979,7 +8950,7 @@ pub const FuncGen = struct { const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(mod) else false; if (val_is_undef) { - const owner_mod = self.dg.ownerModule(); + const owner_mod = self.ng.ownerModule(); // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM, and LLVM will optimize it out. Safety makes the difference @@ -9029,7 +9000,7 @@ pub const FuncGen = struct { /// /// The first instruction of `body_tail` is the one whose copy we want to elide. fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool { - const o = fg.dg.object; + const o = fg.ng.object; const mod = o.pt.zcu; const ip = &mod.intern_pool; for (body_tail[1..]) |body_inst| { @@ -9045,7 +9016,7 @@ pub const FuncGen = struct { } fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -9077,7 +9048,7 @@ pub const FuncGen = struct { fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - const o = self.dg.object; + const o = self.ng.object; const llvm_usize = try o.lowerType(Type.usize); if (!target_util.supportsReturnAddress(o.pt.zcu.getTarget())) { // https://github.com/ziglang/zig/issues/11946 @@ -9089,7 +9060,7 @@ pub const FuncGen = struct { fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - const o = self.dg.object; + const o = self.ng.object; const result = try self.wip.callIntrinsic(.normal, .none, .frameaddress, &.{.ptr}, &.{.@"0"}, ""); return self.wip.cast(.ptrtoint, result, try o.lowerType(Type.usize), ""); } @@ -9106,7 +9077,7 @@ pub const FuncGen = struct { inst: Air.Inst.Index, kind: Builder.Function.Instruction.CmpXchg.Kind, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -9157,7 +9128,7 @@ pub const FuncGen = struct { } fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -9221,7 +9192,7 @@ pub const FuncGen = struct { } fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; @@ -9269,7 +9240,7 @@ pub const FuncGen = struct { inst: Air.Inst.Index, ordering: Builder.AtomicOrdering, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -9294,7 +9265,7 @@ pub const FuncGen = struct { } fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -9329,7 +9300,7 @@ pub const FuncGen = struct { } else { _ = try self.wip.callMemSet(dest_ptr, dest_ptr_align, fill_byte, len, access_kind); } - const owner_mod = self.dg.ownerModule(); + const owner_mod = self.ng.ownerModule(); if (safety and owner_mod.valgrind) { try self.valgrindMarkUndef(dest_ptr, len); } @@ -9435,7 +9406,7 @@ pub const FuncGen = struct { dest_ptr_align: Builder.Alignment, access_kind: Builder.MemoryAccessKind, ) !void { - const o = self.dg.object; + const o = self.ng.object; const usize_zero = try o.builder.intValue(try o.lowerType(Type.usize), 0); const cond = try self.cmp(.normal, .neq, Type.usize, len, usize_zero); const memset_block = try self.wip.block(1, "MemsetTrapSkip"); @@ -9448,7 +9419,7 @@ pub const FuncGen = struct { } fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -9502,7 +9473,7 @@ pub const FuncGen = struct { } fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -9524,7 +9495,7 @@ pub const FuncGen = struct { } fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const un_ty = self.typeOf(ty_op.operand); @@ -9563,7 +9534,7 @@ pub const FuncGen = struct { } fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, intrinsic: Builder.Intrinsic) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = self.typeOfIndex(inst); const operand_ty = self.typeOf(ty_op.operand); @@ -9581,7 +9552,7 @@ pub const FuncGen = struct { } fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, intrinsic: Builder.Intrinsic) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = self.typeOfIndex(inst); const operand_ty = self.typeOf(ty_op.operand); @@ -9599,7 +9570,7 @@ pub const FuncGen = struct { } fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_ty = self.typeOf(ty_op.operand); @@ -9633,7 +9604,7 @@ pub const FuncGen = struct { } fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ip = &mod.intern_pool; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -9665,7 +9636,7 @@ pub const FuncGen = struct { } fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const enum_ty = self.typeOf(un_op); @@ -9683,22 +9654,21 @@ pub const FuncGen = struct { } fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const enum_type = ip.loadEnumType(enum_ty.toIntern()); // TODO: detect when the type changes and re-emit this function. - const gop = try o.named_enum_map.getOrPut(o.gpa, enum_type.decl); + const gop = try o.named_enum_map.getOrPut(o.gpa, enum_ty.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - errdefer assert(o.named_enum_map.remove(enum_type.decl)); + errdefer assert(o.named_enum_map.remove(enum_ty.toIntern())); - const decl = zcu.declPtr(enum_type.decl); const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(.i1, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal), - try o.builder.strtabStringFmt("__zig_is_named_enum_value_{}", .{decl.fqn.fmt(ip)}), + try o.builder.strtabStringFmt("__zig_is_named_enum_value_{}", .{enum_type.name.fmt(ip)}), toLlvmAddressSpace(.generic, target), ); @@ -9741,7 +9711,7 @@ pub const FuncGen = struct { } fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const enum_ty = self.typeOf(un_op); @@ -9759,7 +9729,7 @@ pub const FuncGen = struct { } fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const slice_ty = self.typeOfIndex(inst); @@ -9774,7 +9744,7 @@ pub const FuncGen = struct { } fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const scalar = try self.resolveInst(ty_op.operand); const vector_ty = self.typeOfIndex(inst); @@ -9792,7 +9762,7 @@ pub const FuncGen = struct { } fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -9848,7 +9818,7 @@ pub const FuncGen = struct { vector_len: usize, accum_init: Builder.Value, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const usize_ty = try o.lowerType(Type.usize); const llvm_vector_len = try o.builder.intValue(usize_ty, vector_len); const llvm_result_ty = accum_init.typeOfWip(&self.wip); @@ -9902,7 +9872,7 @@ pub const FuncGen = struct { } fn airReduce(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const target = mod.getTarget(); @@ -10012,7 +9982,7 @@ pub const FuncGen = struct { } fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ip = &mod.intern_pool; @@ -10133,7 +10103,7 @@ pub const FuncGen = struct { } fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ip = &mod.intern_pool; @@ -10256,7 +10226,7 @@ pub const FuncGen = struct { } fn airPrefetch(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const prefetch = self.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; comptime assert(@intFromEnum(std.builtin.PrefetchOptions.Rw.read) == 0); @@ -10306,7 +10276,7 @@ pub const FuncGen = struct { } fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); @@ -10324,12 +10294,12 @@ pub const FuncGen = struct { 0 => @field(Builder.Intrinsic, basename ++ ".x"), 1 => @field(Builder.Intrinsic, basename ++ ".y"), 2 => @field(Builder.Intrinsic, basename ++ ".z"), - else => return self.dg.object.builder.intValue(.i32, default), + else => return self.ng.object.builder.intValue(.i32, default), }, &.{}, &.{}, ""); } fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures @@ -10339,7 +10309,7 @@ pub const FuncGen = struct { } fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures @@ -10362,7 +10332,7 @@ pub const FuncGen = struct { } fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures @@ -10372,7 +10342,7 @@ pub const FuncGen = struct { } fn getErrorNameTable(self: *FuncGen) Allocator.Error!Builder.Variable.Index { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const table = o.error_name_table; @@ -10401,7 +10371,7 @@ pub const FuncGen = struct { opt_handle: Builder.Value, is_by_ref: bool, ) Allocator.Error!Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const field = b: { if (is_by_ref) { const field_ptr = try self.wip.gepStruct(opt_llvm_ty, opt_handle, 1, ""); @@ -10422,7 +10392,7 @@ pub const FuncGen = struct { opt_ty: Type, can_elide_load: bool, ) !Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; const mod = pt.zcu; const payload_ty = opt_ty.optionalChild(mod); @@ -10451,7 +10421,7 @@ pub const FuncGen = struct { payload: Builder.Value, non_null_bit: Builder.Value, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const optional_llvm_ty = try o.lowerType(optional_ty); const non_null_field = try self.wip.cast(.zext, non_null_bit, .i8, ""); @@ -10483,7 +10453,7 @@ pub const FuncGen = struct { struct_ptr_ty: Type, field_index: u32, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const struct_ty = struct_ptr_ty.childType(mod); @@ -10552,7 +10522,7 @@ pub const FuncGen = struct { // "When loading a value of a type like i20 with a size that is not an integral number of bytes, the result is undefined if the value was not originally written using a store of the same type. " // => so load the byte aligned value and trunc the unwanted bits. - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; const mod = pt.zcu; const payload_llvm_ty = try o.lowerType(payload_ty); @@ -10599,7 +10569,7 @@ pub const FuncGen = struct { ptr_alignment: Builder.Alignment, access_kind: Builder.MemoryAccessKind, ) !Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; //const pointee_llvm_ty = try o.lowerType(pointee_type); const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(pt)).toLlvm(); @@ -10620,7 +10590,7 @@ pub const FuncGen = struct { /// alloca and copies the value into it, then returns the alloca instruction. /// For isByRef=false types, it creates a load instruction and returns it. fn load(self: *FuncGen, ptr: Builder.Value, ptr_ty: Type) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const info = ptr_ty.ptrInfo(mod); @@ -10693,7 +10663,7 @@ pub const FuncGen = struct { elem: Builder.Value, ordering: Builder.AtomicOrdering, ) !void { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const info = ptr_ty.ptrInfo(mod); @@ -10784,7 +10754,7 @@ pub const FuncGen = struct { fn valgrindMarkUndef(fg: *FuncGen, ptr: Builder.Value, len: Builder.Value) Allocator.Error!void { const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545; - const o = fg.dg.object; + const o = fg.ng.object; const usize_ty = try o.lowerType(Type.usize); const zero = try o.builder.intValue(usize_ty, 0); const req = try o.builder.intValue(usize_ty, VG_USERREQ__MAKE_MEM_UNDEFINED); @@ -10802,7 +10772,7 @@ pub const FuncGen = struct { a4: Builder.Value, a5: Builder.Value, ) Allocator.Error!Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; const mod = pt.zcu; const target = mod.getTarget(); @@ -10869,13 +10839,13 @@ pub const FuncGen = struct { } fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { - const o = fg.dg.object; + const o = fg.ng.object; const mod = o.pt.zcu; return fg.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type { - const o = fg.dg.object; + const o = fg.ng.object; const mod = o.pt.zcu; return fg.air.typeOfIndex(inst, &mod.intern_pool); } |
