From 525f341f33af9b8aad53931fd5511f00a82cb090 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 15 Jun 2024 16:10:53 -0400 Subject: Zcu: introduce `PerThread` and pass to all the functions --- src/codegen/llvm.zig | 1431 ++++++++++++++++++++++++++------------------------ 1 file changed, 751 insertions(+), 680 deletions(-) (limited to 'src/codegen/llvm.zig') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 6efef20f22..ca574070bf 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -15,8 +15,6 @@ const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); const build_options = @import("build_options"); const Zcu = @import("../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../InternPool.zig"); const Package = @import("../Package.zig"); const Air = @import("../Air.zig"); @@ -810,7 +808,7 @@ pub const Object = struct { gpa: Allocator, builder: Builder, - module: *Module, + pt: Zcu.PerThread, debug_compile_unit: Builder.Metadata, @@ -820,7 +818,7 @@ pub const Object = struct { debug_enums: std.ArrayListUnmanaged(Builder.Metadata), debug_globals: std.ArrayListUnmanaged(Builder.Metadata), - debug_file_map: std.AutoHashMapUnmanaged(*const Module.File, Builder.Metadata), + debug_file_map: std.AutoHashMapUnmanaged(*const Zcu.File, Builder.Metadata), debug_type_map: std.AutoHashMapUnmanaged(Type, Builder.Metadata), debug_unresolved_namespace_scopes: std.AutoArrayHashMapUnmanaged(InternPool.NamespaceIndex, Builder.Metadata), @@ -992,7 +990,10 @@ pub const Object = struct { obj.* = .{ .gpa = gpa, .builder = builder, - .module = comp.module.?, + .pt = .{ + .zcu = comp.module.?, + .tid = .main, + }, .debug_compile_unit = debug_compile_unit, .debug_enums_fwd_ref = debug_enums_fwd_ref, .debug_globals_fwd_ref = debug_globals_fwd_ref, @@ -1033,7 +1034,8 @@ pub const Object = struct { // If o.error_name_table is null, then it was not referenced by any instructions. if (o.error_name_table == .none) return; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const error_name_list = mod.global_error_set.keys(); const llvm_errors = try mod.gpa.alloc(Builder.Constant, error_name_list.len); @@ -1072,7 +1074,7 @@ pub const Object = struct { table_variable_index.setMutability(.constant, &o.builder); table_variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); table_variable_index.setAlignment( - slice_ty.abiAlignment(mod).toLlvm(), + slice_ty.abiAlignment(pt).toLlvm(), &o.builder, ); @@ -1083,8 +1085,7 @@ pub const Object = struct { // If there is no such function in the module, it means the source code does not need it. const name = o.builder.strtabStringIfExists(lt_errors_fn_name) orelse return; const llvm_fn = o.builder.getGlobal(name) orelse return; - const mod = o.module; - const errors_len = mod.global_error_set.count(); + const errors_len = o.pt.zcu.global_error_set.count(); var wip = try Builder.WipFunction.init(&o.builder, .{ .function = llvm_fn.ptrConst(&o.builder).kind.function, @@ -1106,10 +1107,8 @@ pub const Object = struct { } fn genModuleLevelAssembly(object: *Object) !void { - const mod = object.module; - const writer = object.builder.setModuleAsm(); - for (mod.global_assembly.values()) |assembly| { + for (object.pt.zcu.global_assembly.values()) |assembly| { try writer.print("{s}\n", .{assembly}); } try object.builder.finishModuleAsm(); @@ -1131,6 +1130,9 @@ pub const Object = struct { }; pub fn emit(self: *Object, options: EmitOptions) !void { + const zcu = self.pt.zcu; + const comp = zcu.comp; + { try self.genErrorNameTable(); try self.genCmpLtErrorsLenFunction(); @@ -1143,8 +1145,8 @@ pub const Object = struct { const namespace_index = self.debug_unresolved_namespace_scopes.keys()[i]; const fwd_ref = self.debug_unresolved_namespace_scopes.values()[i]; - const namespace = self.module.namespacePtr(namespace_index); - const debug_type = try self.lowerDebugType(namespace.getType(self.module)); + const namespace = zcu.namespacePtr(namespace_index); + const debug_type = try self.lowerDebugType(namespace.getType(zcu)); self.builder.debugForwardReferenceSetType(fwd_ref, debug_type); } @@ -1206,12 +1208,12 @@ pub const Object = struct { try file.writeAll(ptr[0..(bitcode.len * 4)]); } - if (!build_options.have_llvm or !self.module.comp.config.use_lib_llvm) { + if (!build_options.have_llvm or !comp.config.use_lib_llvm) { log.err("emitting without libllvm not implemented", .{}); return error.FailedToEmit; } - initializeLLVMTarget(self.module.comp.root_mod.resolved_target.result.cpu.arch); + initializeLLVMTarget(comp.root_mod.resolved_target.result.cpu.arch); const context: *llvm.Context = llvm.Context.create(); errdefer context.dispose(); @@ -1247,8 +1249,8 @@ pub const Object = struct { @panic("Invalid LLVM triple"); } - const optimize_mode = self.module.comp.root_mod.optimize_mode; - const pic = self.module.comp.root_mod.pic; + const optimize_mode = comp.root_mod.optimize_mode; + const pic = comp.root_mod.pic; const opt_level: llvm.CodeGenOptLevel = if (optimize_mode == .Debug) .None @@ -1257,12 +1259,12 @@ pub const Object = struct { const reloc_mode: llvm.RelocMode = if (pic) .PIC - else if (self.module.comp.config.link_mode == .dynamic) + else if (comp.config.link_mode == .dynamic) llvm.RelocMode.DynamicNoPIC else .Static; - const code_model: llvm.CodeModel = switch (self.module.comp.root_mod.code_model) { + const code_model: llvm.CodeModel = switch (comp.root_mod.code_model) { .default => .Default, .tiny => .Tiny, .small => .Small, @@ -1277,24 +1279,24 @@ pub const Object = struct { var target_machine = llvm.TargetMachine.create( target, target_triple_sentinel, - if (self.module.comp.root_mod.resolved_target.result.cpu.model.llvm_name) |s| s.ptr else null, - self.module.comp.root_mod.resolved_target.llvm_cpu_features.?, + if (comp.root_mod.resolved_target.result.cpu.model.llvm_name) |s| s.ptr else null, + comp.root_mod.resolved_target.llvm_cpu_features.?, opt_level, reloc_mode, code_model, - self.module.comp.function_sections, - self.module.comp.data_sections, + comp.function_sections, + comp.data_sections, float_abi, - if (target_util.llvmMachineAbi(self.module.comp.root_mod.resolved_target.result)) |s| s.ptr else null, + if (target_util.llvmMachineAbi(comp.root_mod.resolved_target.result)) |s| s.ptr else null, ); errdefer target_machine.dispose(); if (pic) module.setModulePICLevel(); - if (self.module.comp.config.pie) module.setModulePIELevel(); + if (comp.config.pie) module.setModulePIELevel(); if (code_model != .Default) module.setModuleCodeModel(code_model); - if (self.module.comp.llvm_opt_bisect_limit >= 0) { - context.setOptBisectLimit(self.module.comp.llvm_opt_bisect_limit); + if (comp.llvm_opt_bisect_limit >= 0) { + context.setOptBisectLimit(comp.llvm_opt_bisect_limit); } // Unfortunately, LLVM shits the bed when we ask for both binary and assembly. @@ -1352,11 +1354,13 @@ pub const Object = struct { pub fn updateFunc( o: *Object, - zcu: *Module, + pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness, ) !void { + assert(std.meta.eql(pt, o.pt)); + const zcu = pt.zcu; const comp = zcu.comp; const func = zcu.funcInfo(func_index); const decl_index = func.owner_decl; @@ -1437,7 +1441,7 @@ pub const Object = struct { var llvm_arg_i: u32 = 0; // This gets the LLVM values from the function and stores them in `dg.args`. - const sret = firstParamSRet(fn_info, zcu, target); + const sret = firstParamSRet(fn_info, pt, target); const ret_ptr: Builder.Value = if (sret) param: { const param = wip.arg(llvm_arg_i); llvm_arg_i += 1; @@ -1478,8 +1482,8 @@ pub const Object = struct { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]); const param = wip.arg(llvm_arg_i); - if (isByRef(param_ty, zcu)) { - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + if (isByRef(param_ty, pt)) { + const alignment = param_ty.abiAlignment(pt).toLlvm(); const param_llvm_ty = param.typeOfWip(&wip); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); @@ -1495,12 +1499,12 @@ pub const Object = struct { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); const param_llvm_ty = try o.lowerType(param_ty); const param = wip.arg(llvm_arg_i); - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); try o.addByRefParamAttrs(&attributes, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty); llvm_arg_i += 1; - if (isByRef(param_ty, zcu)) { + if (isByRef(param_ty, pt)) { args.appendAssumeCapacity(param); } else { args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, "")); @@ -1510,12 +1514,12 @@ pub const Object = struct { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); const param_llvm_ty = try o.lowerType(param_ty); const param = wip.arg(llvm_arg_i); - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); try attributes.addParamAttr(llvm_arg_i, .noundef, &o.builder); llvm_arg_i += 1; - if (isByRef(param_ty, zcu)) { + if (isByRef(param_ty, pt)) { args.appendAssumeCapacity(param); } else { args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, "")); @@ -1528,11 +1532,11 @@ pub const Object = struct { llvm_arg_i += 1; const param_llvm_ty = try o.lowerType(param_ty); - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); - args.appendAssumeCapacity(if (isByRef(param_ty, zcu)) + args.appendAssumeCapacity(if (isByRef(param_ty, pt)) arg_ptr else try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); @@ -1556,7 +1560,7 @@ pub const Object = struct { const elem_align = (if (ptr_info.flags.alignment != .none) @as(InternPool.Alignment, ptr_info.flags.alignment) else - Type.fromInterned(ptr_info.child).abiAlignment(zcu).max(.@"1")).toLlvm(); + Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1")).toLlvm(); try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder); const ptr_param = wip.arg(llvm_arg_i); llvm_arg_i += 1; @@ -1573,7 +1577,7 @@ pub const Object = struct { const field_types = it.types_buffer[0..it.types_len]; const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); const param_llvm_ty = try o.lowerType(param_ty); - const param_alignment = param_ty.abiAlignment(zcu).toLlvm(); + const param_alignment = param_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, param_alignment, target); const llvm_ty = try o.builder.structType(.normal, field_types); for (0..field_types.len) |field_i| { @@ -1585,7 +1589,7 @@ pub const Object = struct { _ = try wip.store(.normal, param, field_ptr, alignment); } - const is_by_ref = isByRef(param_ty, zcu); + const is_by_ref = isByRef(param_ty, pt); args.appendAssumeCapacity(if (is_by_ref) arg_ptr else @@ -1603,11 +1607,11 @@ pub const Object = struct { const param = wip.arg(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); - args.appendAssumeCapacity(if (isByRef(param_ty, zcu)) + args.appendAssumeCapacity(if (isByRef(param_ty, pt)) arg_ptr else try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); @@ -1618,11 +1622,11 @@ pub const Object = struct { const param = wip.arg(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); - args.appendAssumeCapacity(if (isByRef(param_ty, zcu)) + args.appendAssumeCapacity(if (isByRef(param_ty, pt)) arg_ptr else try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); @@ -1700,8 +1704,9 @@ pub const Object = struct { try fg.wip.finish(); } - pub fn updateDecl(self: *Object, module: *Module, decl_index: InternPool.DeclIndex) !void { - const decl = module.declPtr(decl_index); + pub fn updateDecl(self: *Object, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { + assert(std.meta.eql(pt, self.pt)); + const decl = pt.zcu.declPtr(decl_index); var dg: DeclGen = .{ .object = self, .decl = decl, @@ -1711,7 +1716,7 @@ pub const Object = struct { dg.genDecl() catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try module.failed_analysis.put(module.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); + try pt.zcu.failed_analysis.put(pt.zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); dg.err_msg = null; return; }, @@ -1721,10 +1726,12 @@ pub const Object = struct { pub fn updateExports( self: *Object, - zcu: *Zcu, - exported: Module.Exported, + pt: Zcu.PerThread, + exported: Zcu.Exported, export_indices: []const u32, ) link.File.UpdateExportsError!void { + assert(std.meta.eql(pt, self.pt)); + const zcu = pt.zcu; const decl_index = switch (exported) { .decl_index => |i| i, .value => |val| return updateExportedValue(self, zcu, val, export_indices), @@ -1748,7 +1755,7 @@ pub const Object = struct { fn updateExportedValue( o: *Object, - mod: *Module, + mod: *Zcu, exported_value: InternPool.Index, export_indices: []const u32, ) link.File.UpdateExportsError!void { @@ -1783,7 +1790,7 @@ pub const Object = struct { fn updateExportedGlobal( o: *Object, - mod: *Module, + mod: *Zcu, global_index: Builder.Global.Index, export_indices: []const u32, ) link.File.UpdateExportsError!void { @@ -1879,7 +1886,7 @@ pub const Object = struct { global.delete(&self.builder); } - fn getDebugFile(o: *Object, file: *const Module.File) Allocator.Error!Builder.Metadata { + fn getDebugFile(o: *Object, file: *const Zcu.File) Allocator.Error!Builder.Metadata { const gpa = o.gpa; const gop = try o.debug_file_map.getOrPut(gpa, file); errdefer assert(o.debug_file_map.remove(file)); @@ -1909,7 +1916,8 @@ pub const Object = struct { const gpa = o.gpa; const target = o.target; - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; if (o.debug_type_map.get(ty)) |debug_type| return debug_type; @@ -1931,7 +1939,7 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); const builder_name = try o.builder.metadataString(name); - const debug_bits = ty.abiSize(zcu) * 8; // lldb cannot handle non-byte sized types + const debug_bits = ty.abiSize(pt) * 8; // lldb cannot handle non-byte sized types const debug_int_type = switch (info.signedness) { .signed => try o.builder.debugSignedType(builder_name, debug_bits), .unsigned => try o.builder.debugUnsignedType(builder_name, debug_bits), @@ -1941,9 +1949,9 @@ pub const Object = struct { }, .Enum => { const owner_decl_index = ty.getOwnerDecl(zcu); - const owner_decl = o.module.declPtr(owner_decl_index); + const owner_decl = zcu.declPtr(owner_decl_index); - if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { const debug_enum_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); try o.debug_type_map.put(gpa, ty, debug_enum_type); return debug_enum_type; @@ -1961,7 +1969,7 @@ pub const Object = struct { for (enum_type.names.get(ip), 0..) |field_name_ip, i| { var bigint_space: Value.BigIntSpace = undefined; const bigint = if (enum_type.values.len != 0) - Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, zcu) + Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, pt) else std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst(); @@ -1986,8 +1994,8 @@ pub const Object = struct { scope, owner_decl.typeSrcLine(zcu) + 1, // Line try o.lowerDebugType(int_ty), - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(enumerators), ); @@ -2027,10 +2035,10 @@ pub const Object = struct { ptr_info.flags.is_const or ptr_info.flags.is_volatile or ptr_info.flags.size == .Many or ptr_info.flags.size == .C or - !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu)) + !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(pt)) { - const bland_ptr_ty = try zcu.ptrType(.{ - .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu)) + const bland_ptr_ty = try pt.ptrType(.{ + .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(pt)) .anyopaque_type else ptr_info.child, @@ -2060,10 +2068,10 @@ pub const Object = struct { defer gpa.free(name); const line = 0; - const ptr_size = ptr_ty.abiSize(zcu); - const ptr_align = ptr_ty.abiAlignment(zcu); - const len_size = len_ty.abiSize(zcu); - const len_align = len_ty.abiAlignment(zcu); + const ptr_size = ptr_ty.abiSize(pt); + const ptr_align = ptr_ty.abiAlignment(pt); + const len_size = len_ty.abiSize(pt); + const len_align = len_ty.abiAlignment(pt); const len_offset = len_align.forward(ptr_size); @@ -2095,8 +2103,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope line, .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ debug_ptr_type, debug_len_type, @@ -2124,7 +2132,7 @@ pub const Object = struct { 0, // Line debug_elem_ty, target.ptrBitWidth(), - (ty.ptrAlignment(zcu).toByteUnits() orelse 0) * 8, + (ty.ptrAlignment(pt).toByteUnits() orelse 0) * 8, 0, // Offset ); @@ -2149,7 +2157,7 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); const owner_decl_index = ty.getOwnerDecl(zcu); - const owner_decl = o.module.declPtr(owner_decl_index); + const owner_decl = zcu.declPtr(owner_decl_index); const file_scope = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu); const debug_opaque_type = try o.builder.debugStructType( try o.builder.metadataString(name), @@ -2171,8 +2179,8 @@ pub const Object = struct { .none, // Scope 0, // Line try o.lowerDebugType(ty.childType(zcu)), - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ try o.builder.debugSubrange( try o.builder.debugConstant(try o.builder.intConst(.i64, 0)), @@ -2214,8 +2222,8 @@ pub const Object = struct { .none, // Scope 0, // Line debug_elem_type, - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ try o.builder.debugSubrange( try o.builder.debugConstant(try o.builder.intConst(.i64, 0)), @@ -2231,7 +2239,7 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); const child_ty = ty.optionalChild(zcu); - if (!child_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!child_ty.hasRuntimeBitsIgnoreComptime(pt)) { const debug_bool_type = try o.builder.debugBoolType( try o.builder.metadataString(name), 8, @@ -2258,10 +2266,10 @@ pub const Object = struct { } const non_null_ty = Type.u8; - const payload_size = child_ty.abiSize(zcu); - const payload_align = child_ty.abiAlignment(zcu); - const non_null_size = non_null_ty.abiSize(zcu); - const non_null_align = non_null_ty.abiAlignment(zcu); + const payload_size = child_ty.abiSize(pt); + const payload_align = child_ty.abiAlignment(pt); + const non_null_size = non_null_ty.abiSize(pt); + const non_null_align = non_null_ty.abiAlignment(pt); const non_null_offset = non_null_align.forward(payload_size); const debug_data_type = try o.builder.debugMemberType( @@ -2292,8 +2300,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ debug_data_type, debug_some_type, @@ -2310,7 +2318,7 @@ pub const Object = struct { }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(zcu); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // TODO: Maybe remove? const debug_error_union_type = try o.lowerDebugType(Type.anyerror); try o.debug_type_map.put(gpa, ty, debug_error_union_type); @@ -2320,10 +2328,10 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); - const error_size = Type.anyerror.abiSize(zcu); - const error_align = Type.anyerror.abiAlignment(zcu); - const payload_size = payload_ty.abiSize(zcu); - const payload_align = payload_ty.abiAlignment(zcu); + const error_size = Type.anyerror.abiSize(pt); + const error_align = Type.anyerror.abiAlignment(pt); + const payload_size = payload_ty.abiSize(pt); + const payload_align = payload_ty.abiAlignment(pt); var error_index: u32 = undefined; var payload_index: u32 = undefined; @@ -2371,8 +2379,8 @@ pub const Object = struct { o.debug_compile_unit, // Sope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&fields), ); @@ -2399,8 +2407,8 @@ pub const Object = struct { const info = Type.fromInterned(backing_int_ty).intInfo(zcu); const builder_name = try o.builder.metadataString(name); const debug_int_type = switch (info.signedness) { - .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(zcu) * 8), - .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(zcu) * 8), + .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(pt) * 8), + .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(pt) * 8), }; try o.debug_type_map.put(gpa, ty, debug_int_type); return debug_int_type; @@ -2420,10 +2428,10 @@ pub const Object = struct { const debug_fwd_ref = try o.builder.debugForwardReference(); for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue; + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; - const field_size = Type.fromInterned(field_ty).abiSize(zcu); - const field_align = Type.fromInterned(field_ty).abiAlignment(zcu); + const field_size = Type.fromInterned(field_ty).abiSize(pt); + const field_align = Type.fromInterned(field_ty).abiAlignment(pt); const field_offset = field_align.forward(offset); offset = field_offset + field_size; @@ -2451,8 +2459,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2479,7 +2487,7 @@ pub const Object = struct { else => {}, } - if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { const owner_decl_index = ty.getOwnerDecl(zcu); const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); try o.debug_type_map.put(gpa, ty, debug_struct_type); @@ -2502,14 +2510,14 @@ pub const Object = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - const field_size = field_ty.abiSize(zcu); - const field_align = zcu.structFieldAlignment( + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + const field_size = field_ty.abiSize(pt); + const field_align = pt.structFieldAlignment( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, ); - const field_offset = ty.structFieldOffset(field_index, zcu); + const field_offset = ty.structFieldOffset(field_index, pt); const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); @@ -2532,8 +2540,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2553,7 +2561,7 @@ pub const Object = struct { const union_type = ip.loadUnionType(ty.toIntern()); if (!union_type.haveFieldTypes(ip) or - !ty.hasRuntimeBitsIgnoreComptime(zcu) or + !ty.hasRuntimeBitsIgnoreComptime(pt) or !union_type.haveLayout(ip)) { const debug_union_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); @@ -2561,7 +2569,7 @@ pub const Object = struct { return debug_union_type; } - const layout = zcu.getUnionLayout(union_type); + const layout = pt.getUnionLayout(union_type); const debug_fwd_ref = try o.builder.debugForwardReference(); @@ -2575,8 +2583,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple( &.{try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty))}, ), @@ -2603,12 +2611,12 @@ pub const Object = struct { for (0..tag_type.names.len) |field_index| { const field_ty = union_type.field_types.get(ip)[field_index]; - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; - const field_size = Type.fromInterned(field_ty).abiSize(zcu); + const field_size = Type.fromInterned(field_ty).abiSize(pt); const field_align: InternPool.Alignment = switch (union_type.flagsPtr(ip).layout) { .@"packed" => .none, - .auto, .@"extern" => zcu.unionFieldNormalAlignment(union_type, @intCast(field_index)), + .auto, .@"extern" => pt.unionFieldNormalAlignment(union_type, @intCast(field_index)), }; const field_name = tag_type.names.get(ip)[field_index]; @@ -2637,8 +2645,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2696,8 +2704,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&full_fields), ); @@ -2718,13 +2726,13 @@ pub const Object = struct { try debug_param_types.ensureUnusedCapacity(3 + fn_info.param_types.len); // Return type goes first. - if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(zcu)) { - const sret = firstParamSRet(fn_info, zcu, target); + if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(pt)) { + const sret = firstParamSRet(fn_info, pt, target); const ret_ty = if (sret) Type.void else Type.fromInterned(fn_info.return_type); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ret_ty)); if (sret) { - const ptr_ty = try zcu.singleMutPtrType(Type.fromInterned(fn_info.return_type)); + const ptr_ty = try pt.singleMutPtrType(Type.fromInterned(fn_info.return_type)); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty)); } } else { @@ -2732,18 +2740,18 @@ pub const Object = struct { } if (Type.fromInterned(fn_info.return_type).isError(zcu) and - o.module.comp.config.any_error_tracing) + zcu.comp.config.any_error_tracing) { - const ptr_ty = try zcu.singleMutPtrType(try o.getStackTraceType()); + const ptr_ty = try pt.singleMutPtrType(try o.getStackTraceType()); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty)); } for (0..fn_info.param_types.len) |i| { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[i]); - if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; - if (isByRef(param_ty, zcu)) { - const ptr_ty = try zcu.singleMutPtrType(param_ty); + if (isByRef(param_ty, pt)) { + const ptr_ty = try pt.singleMutPtrType(param_ty); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty)); } else { debug_param_types.appendAssumeCapacity(try o.lowerDebugType(param_ty)); @@ -2770,7 +2778,7 @@ pub const Object = struct { } fn namespaceToDebugScope(o: *Object, namespace_index: InternPool.NamespaceIndex) !Builder.Metadata { - const zcu = o.module; + const zcu = o.pt.zcu; const namespace = zcu.namespacePtr(namespace_index); const file_scope = namespace.fileScope(zcu); if (namespace.parent == .none) return try o.getDebugFile(file_scope); @@ -2783,7 +2791,7 @@ pub const Object = struct { } fn makeEmptyNamespaceDebugType(o: *Object, decl_index: InternPool.DeclIndex) !Builder.Metadata { - const zcu = o.module; + const zcu = o.pt.zcu; const decl = zcu.declPtr(decl_index); const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu); return o.builder.debugStructType( @@ -2799,7 +2807,7 @@ pub const Object = struct { } fn getStackTraceType(o: *Object) Allocator.Error!Type { - const zcu = o.module; + const zcu = o.pt.zcu; const std_mod = zcu.std_mod; const std_file_imported = zcu.importPkg(std_mod) catch unreachable; @@ -2807,13 +2815,13 @@ pub const Object = struct { const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "builtin", .no_embedded_nulls); const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index); const std_namespace = zcu.namespacePtr(zcu.declPtr(std_file_root_decl.unwrap().?).src_namespace); - const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Module.DeclAdapter{ .zcu = zcu }).?; + const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }).?; const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "StackTrace", .no_embedded_nulls); // buffer is only used for int_type, `builtin` is a struct. const builtin_ty = zcu.declPtr(builtin_decl).val.toType(); const builtin_namespace = zcu.namespacePtrUnwrap(builtin_ty.getNamespaceIndex(zcu)).?; - const stack_trace_decl_index = builtin_namespace.decls.getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .zcu = zcu }).?; + const stack_trace_decl_index = builtin_namespace.decls.getKeyAdapted(stack_trace_str, Zcu.DeclAdapter{ .zcu = zcu }).?; const stack_trace_decl = zcu.declPtr(stack_trace_decl_index); // Sema should have ensured that StackTrace was analyzed. @@ -2824,7 +2832,7 @@ pub const Object = struct { fn allocTypeName(o: *Object, ty: Type) Allocator.Error![:0]const u8 { var buffer = std.ArrayList(u8).init(o.gpa); errdefer buffer.deinit(); - try ty.print(buffer.writer(), o.module); + try ty.print(buffer.writer(), o.pt); return buffer.toOwnedSliceSentinel(0); } @@ -2835,7 +2843,8 @@ pub const Object = struct { o: *Object, decl_index: InternPool.DeclIndex, ) Allocator.Error!Builder.Function.Index { - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = o.gpa; const decl = zcu.declPtr(decl_index); @@ -2848,7 +2857,7 @@ pub const Object = struct { assert(decl.has_tv); const fn_info = zcu.typeToFunc(zig_fn_type).?; const target = owner_mod.resolved_target.result; - const sret = firstParamSRet(fn_info, zcu, target); + const sret = firstParamSRet(fn_info, pt, target); const is_extern = decl.isExtern(zcu); const function_index = try o.builder.addFunction( @@ -2929,14 +2938,14 @@ pub const Object = struct { .byval => { const param_index = it.zig_index - 1; const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]); - if (!isByRef(param_ty, zcu)) { + if (!isByRef(param_ty, pt)) { try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); const param_llvm_ty = try o.lowerType(param_ty); - const alignment = param_ty.abiAlignment(zcu); + const alignment = param_ty.abiAlignment(pt); try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment.toLlvm(), it.byval_attr, param_llvm_ty); }, .byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder), @@ -2964,7 +2973,7 @@ pub const Object = struct { attributes: *Builder.FunctionAttributes.Wip, owner_mod: *Package.Module, ) Allocator.Error!void { - const comp = o.module.comp; + const comp = o.pt.zcu.comp; if (!owner_mod.red_zone) { try attributes.addFnAttr(.noredzone, &o.builder); @@ -3039,7 +3048,7 @@ pub const Object = struct { } errdefer assert(o.anon_decl_map.remove(decl_val)); - const mod = o.module; + const mod = o.pt.zcu; const decl_ty = mod.intern_pool.typeOf(decl_val); const variable_index = try o.builder.addVariable( @@ -3065,7 +3074,7 @@ pub const Object = struct { if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable; errdefer assert(o.decl_map.remove(decl_index)); - const zcu = o.module; + const zcu = o.pt.zcu; const decl = zcu.declPtr(decl_index); const is_extern = decl.isExtern(zcu); @@ -3100,11 +3109,12 @@ pub const Object = struct { } fn errorIntType(o: *Object) Allocator.Error!Builder.Type { - return o.builder.intType(o.module.errorSetBits()); + return o.builder.intType(o.pt.zcu.errorSetBits()); } fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const target = mod.getTarget(); const ip = &mod.intern_pool; return switch (t.toIntern()) { @@ -3230,7 +3240,7 @@ pub const Object = struct { ), .opt_type => |child_ty| { // Must stay in sync with `opt_payload` logic in `lowerPtr`. - if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(mod)) return .i8; + if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(pt)) return .i8; const payload_ty = try o.lowerType(Type.fromInterned(child_ty)); if (t.optionalReprIsPayload(mod)) return payload_ty; @@ -3238,8 +3248,8 @@ pub const Object = struct { comptime assert(optional_layout_version == 3); var fields: [3]Builder.Type = .{ payload_ty, .i8, undefined }; var fields_len: usize = 2; - const offset = Type.fromInterned(child_ty).abiSize(mod) + 1; - const abi_size = t.abiSize(mod); + const offset = Type.fromInterned(child_ty).abiSize(pt) + 1; + const abi_size = t.abiSize(pt); const padding_len = abi_size - offset; if (padding_len > 0) { fields[2] = try o.builder.arrayType(padding_len, .i8); @@ -3252,16 +3262,16 @@ pub const Object = struct { // Must stay in sync with `codegen.errUnionPayloadOffset`. // See logic in `lowerPtr`. const error_type = try o.errorIntType(); - if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(mod)) + if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(pt)) return error_type; const payload_type = try o.lowerType(Type.fromInterned(error_union_type.payload_type)); - const err_int_ty = try mod.errorIntType(); + const err_int_ty = try o.pt.errorIntType(); - const payload_align = Type.fromInterned(error_union_type.payload_type).abiAlignment(mod); - const error_align = err_int_ty.abiAlignment(mod); + const payload_align = Type.fromInterned(error_union_type.payload_type).abiAlignment(pt); + const error_align = err_int_ty.abiAlignment(pt); - const payload_size = Type.fromInterned(error_union_type.payload_type).abiSize(mod); - const error_size = err_int_ty.abiSize(mod); + const payload_size = Type.fromInterned(error_union_type.payload_type).abiSize(pt); + const error_size = err_int_ty.abiSize(pt); var fields: [3]Builder.Type = undefined; var fields_len: usize = 2; @@ -3317,12 +3327,12 @@ pub const Object = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - const field_align = mod.structFieldAlignment( + const field_align = pt.structFieldAlignment( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, ); - const field_ty_align = field_ty.abiAlignment(mod); + const field_ty_align = field_ty.abiAlignment(pt); if (field_align.compare(.lt, field_ty_align)) struct_kind = .@"packed"; big_align = big_align.max(field_align); const prev_offset = offset; @@ -3334,7 +3344,7 @@ pub const Object = struct { try o.builder.arrayType(padding_len, .i8), ); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { // This is a zero-bit field. If there are runtime bits after this field, // map to the next LLVM field (which we know exists): otherwise, don't // map the field, indicating it's at the end of the struct. @@ -3353,7 +3363,7 @@ pub const Object = struct { }, @intCast(llvm_field_types.items.len)); try llvm_field_types.append(o.gpa, try o.lowerType(field_ty)); - offset += field_ty.abiSize(mod); + offset += field_ty.abiSize(pt); } { const prev_offset = offset; @@ -3386,7 +3396,7 @@ pub const Object = struct { var offset: u64 = 0; var big_align: InternPool.Alignment = .none; - const struct_size = t.abiSize(mod); + const struct_size = t.abiSize(pt); for ( anon_struct_type.types.get(ip), @@ -3395,7 +3405,7 @@ pub const Object = struct { ) |field_ty, field_val, field_index| { if (field_val != .none) continue; - const field_align = Type.fromInterned(field_ty).abiAlignment(mod); + const field_align = Type.fromInterned(field_ty).abiAlignment(pt); big_align = big_align.max(field_align); const prev_offset = offset; offset = field_align.forward(offset); @@ -3405,7 +3415,7 @@ pub const Object = struct { o.gpa, try o.builder.arrayType(padding_len, .i8), ); - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) { + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) { // This is a zero-bit field. If there are runtime bits after this field, // map to the next LLVM field (which we know exists): otherwise, don't // map the field, indicating it's at the end of the struct. @@ -3423,7 +3433,7 @@ pub const Object = struct { }, @intCast(llvm_field_types.items.len)); try llvm_field_types.append(o.gpa, try o.lowerType(Type.fromInterned(field_ty))); - offset += Type.fromInterned(field_ty).abiSize(mod); + offset += Type.fromInterned(field_ty).abiSize(pt); } { const prev_offset = offset; @@ -3440,10 +3450,10 @@ pub const Object = struct { if (o.type_map.get(t.toIntern())) |value| return value; const union_obj = ip.loadUnionType(t.toIntern()); - const layout = mod.getUnionLayout(union_obj); + const layout = pt.getUnionLayout(union_obj); if (union_obj.flagsPtr(ip).layout == .@"packed") { - const int_ty = try o.builder.intType(@intCast(t.bitSize(mod))); + const int_ty = try o.builder.intType(@intCast(t.bitSize(pt))); try o.type_map.put(o.gpa, t.toIntern(), int_ty); return int_ty; } @@ -3552,18 +3562,20 @@ pub const Object = struct { /// being a zero bit type, but it should still be lowered as an i8 in such case. /// There are other similar cases handled here as well. fn lowerPtrElemTy(o: *Object, elem_ty: Type) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { .Opaque => true, .Fn => !mod.typeToFunc(elem_ty).?.is_generic, - .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), - else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), + .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(pt), + else => elem_ty.hasRuntimeBitsIgnoreComptime(pt), }; return if (lower_elem_ty) try o.lowerType(elem_ty) else .i8; } fn lowerTypeFn(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); const ret_ty = try lowerFnRetTy(o, fn_info); @@ -3571,14 +3583,14 @@ pub const Object = struct { var llvm_params = std.ArrayListUnmanaged(Builder.Type){}; defer llvm_params.deinit(o.gpa); - if (firstParamSRet(fn_info, mod, target)) { + if (firstParamSRet(fn_info, pt, target)) { try llvm_params.append(o.gpa, .ptr); } if (Type.fromInterned(fn_info.return_type).isError(mod) and mod.comp.config.any_error_tracing) { - const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType()); + const ptr_ty = try pt.singleMutPtrType(try o.getStackTraceType()); try llvm_params.append(o.gpa, try o.lowerType(ptr_ty)); } @@ -3595,7 +3607,7 @@ pub const Object = struct { .abi_sized_int => { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); try llvm_params.append(o.gpa, try o.builder.intType( - @intCast(param_ty.abiSize(mod) * 8), + @intCast(param_ty.abiSize(pt) * 8), )); }, .slice => { @@ -3633,7 +3645,8 @@ pub const Object = struct { } fn lowerValueToInt(o: *Object, llvm_int_ty: Builder.Type, arg_val: InternPool.Index) Error!Builder.Constant { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); @@ -3666,15 +3679,15 @@ pub const Object = struct { var running_int = try o.builder.intConst(llvm_int_ty, 0); var running_bits: u16 = 0; for (struct_type.field_types.get(ip), 0..) |field_ty, field_index| { - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; const shift_rhs = try o.builder.intConst(llvm_int_ty, running_bits); - const field_val = try o.lowerValueToInt(llvm_int_ty, (try val.fieldValue(mod, field_index)).toIntern()); + const field_val = try o.lowerValueToInt(llvm_int_ty, (try val.fieldValue(pt, field_index)).toIntern()); const shifted = try o.builder.binConst(.shl, field_val, shift_rhs); running_int = try o.builder.binConst(.xor, running_int, shifted); - const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(mod)); + const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(pt)); running_bits += ty_bit_size; } return running_int; @@ -3683,7 +3696,7 @@ pub const Object = struct { else => unreachable, }, .un => |un| { - const layout = ty.unionGetLayout(mod); + const layout = ty.unionGetLayout(pt); if (layout.payload_size == 0) return o.lowerValue(un.tag); const union_obj = mod.typeToUnion(ty).?; @@ -3701,7 +3714,7 @@ pub const Object = struct { } const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(llvm_int_ty, 0); + if (!field_ty.hasRuntimeBits(pt)) return o.builder.intConst(llvm_int_ty, 0); return o.lowerValueToInt(llvm_int_ty, un.val); }, .simple_value => |simple_value| switch (simple_value) { @@ -3715,7 +3728,7 @@ pub const Object = struct { .opt => {}, // pointer like optional expected else => unreachable, } - const bits = ty.bitSize(mod); + const bits = ty.bitSize(pt); const bytes: usize = @intCast(std.mem.alignForward(u64, bits, 8) / 8); var stack = std.heap.stackFallback(32, o.gpa); @@ -3729,12 +3742,7 @@ pub const Object = struct { defer allocator.free(limbs); @memset(limbs, 0); - val.writeToPackedMemory( - ty, - mod, - std.mem.sliceAsBytes(limbs)[0..bytes], - 0, - ) catch unreachable; + val.writeToPackedMemory(ty, pt, std.mem.sliceAsBytes(limbs)[0..bytes], 0) catch unreachable; if (builtin.target.cpu.arch.endian() == .little) { if (target.cpu.arch.endian() == .big) @@ -3752,7 +3760,8 @@ pub const Object = struct { } fn lowerValue(o: *Object, arg_val: InternPool.Index) Error!Builder.Constant { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); @@ -3811,7 +3820,7 @@ pub const Object = struct { }, .int => { var bigint_space: Value.BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_space, mod); + const bigint = val.toBigInt(&bigint_space, pt); return lowerBigInt(o, ty, bigint); }, .err => |err| { @@ -3821,24 +3830,24 @@ pub const Object = struct { }, .error_union => |error_union| { const err_val = switch (error_union.val) { - .err_name => |err_name| try mod.intern(.{ .err = .{ + .err_name => |err_name| try pt.intern(.{ .err = .{ .ty = ty.errorUnionSet(mod).toIntern(), .name = err_name, } }), - .payload => (try mod.intValue(try mod.errorIntType(), 0)).toIntern(), + .payload => (try pt.intValue(try pt.errorIntType(), 0)).toIntern(), }; - const err_int_ty = try mod.errorIntType(); + const err_int_ty = try pt.errorIntType(); const payload_type = ty.errorUnionPayload(mod); - if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) { // We use the error type directly as the type. return o.lowerValue(err_val); } - const payload_align = payload_type.abiAlignment(mod); - const error_align = err_int_ty.abiAlignment(mod); + const payload_align = payload_type.abiAlignment(pt); + const error_align = err_int_ty.abiAlignment(pt); const llvm_error_value = try o.lowerValue(err_val); const llvm_payload_value = try o.lowerValue(switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }), + .err_name => try pt.intern(.{ .undef = payload_type.toIntern() }), .payload => |payload| payload, }); @@ -3869,16 +3878,16 @@ pub const Object = struct { .enum_tag => |enum_tag| o.lowerValue(enum_tag.int), .float => switch (ty.floatBits(target)) { 16 => if (backendSupportsF16(target)) - try o.builder.halfConst(val.toFloat(f16, mod)) + try o.builder.halfConst(val.toFloat(f16, pt)) else - try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, mod)))), - 32 => try o.builder.floatConst(val.toFloat(f32, mod)), - 64 => try o.builder.doubleConst(val.toFloat(f64, mod)), + try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, pt)))), + 32 => try o.builder.floatConst(val.toFloat(f32, pt)), + 64 => try o.builder.doubleConst(val.toFloat(f64, pt)), 80 => if (backendSupportsF80(target)) - try o.builder.x86_fp80Const(val.toFloat(f80, mod)) + try o.builder.x86_fp80Const(val.toFloat(f80, pt)) else - try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, mod)))), - 128 => try o.builder.fp128Const(val.toFloat(f128, mod)), + try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, pt)))), + 128 => try o.builder.fp128Const(val.toFloat(f128, pt)), else => unreachable, }, .ptr => try o.lowerPtr(arg_val, 0), @@ -3891,7 +3900,7 @@ pub const Object = struct { const payload_ty = ty.optionalChild(mod); const non_null_bit = try o.builder.intConst(.i8, @intFromBool(opt.val != .none)); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return non_null_bit; } const llvm_ty = try o.lowerType(ty); @@ -3909,7 +3918,7 @@ pub const Object = struct { var fields: [3]Builder.Type = undefined; var vals: [3]Builder.Constant = undefined; vals[0] = try o.lowerValue(switch (opt.val) { - .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), + .none => try pt.intern(.{ .undef = payload_ty.toIntern() }), else => |payload| payload, }); vals[1] = non_null_bit; @@ -4058,9 +4067,9 @@ pub const Object = struct { 0.., ) |field_ty, field_val, field_index| { if (field_val != .none) continue; - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; - const field_align = Type.fromInterned(field_ty).abiAlignment(mod); + const field_align = Type.fromInterned(field_ty).abiAlignment(pt); big_align = big_align.max(field_align); const prev_offset = offset; offset = field_align.forward(offset); @@ -4076,13 +4085,13 @@ pub const Object = struct { } vals[llvm_index] = - try o.lowerValue((try val.fieldValue(mod, field_index)).toIntern()); + try o.lowerValue((try val.fieldValue(pt, field_index)).toIntern()); fields[llvm_index] = vals[llvm_index].typeOf(&o.builder); if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index]) need_unnamed = true; llvm_index += 1; - offset += Type.fromInterned(field_ty).abiSize(mod); + offset += Type.fromInterned(field_ty).abiSize(pt); } { const prev_offset = offset; @@ -4109,7 +4118,7 @@ pub const Object = struct { if (struct_type.layout == .@"packed") { comptime assert(Type.packed_struct_layout_version == 2); - const bits = ty.bitSize(mod); + const bits = ty.bitSize(pt); const llvm_int_ty = try o.builder.intType(@intCast(bits)); return o.lowerValueToInt(llvm_int_ty, arg_val); @@ -4138,7 +4147,7 @@ pub const Object = struct { var field_it = struct_type.iterateRuntimeOrder(ip); while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - const field_align = mod.structFieldAlignment( + const field_align = pt.structFieldAlignment( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, @@ -4158,20 +4167,20 @@ pub const Object = struct { llvm_index += 1; } - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { // This is a zero-bit field - we only needed it for the alignment. continue; } vals[llvm_index] = try o.lowerValue( - (try val.fieldValue(mod, field_index)).toIntern(), + (try val.fieldValue(pt, field_index)).toIntern(), ); fields[llvm_index] = vals[llvm_index].typeOf(&o.builder); if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index]) need_unnamed = true; llvm_index += 1; - offset += field_ty.abiSize(mod); + offset += field_ty.abiSize(pt); } { const prev_offset = offset; @@ -4195,7 +4204,7 @@ pub const Object = struct { }, .un => |un| { const union_ty = try o.lowerType(ty); - const layout = ty.unionGetLayout(mod); + const layout = ty.unionGetLayout(pt); if (layout.payload_size == 0) return o.lowerValue(un.tag); const union_obj = mod.typeToUnion(ty).?; @@ -4206,8 +4215,8 @@ pub const Object = struct { const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); if (container_layout == .@"packed") { - if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(union_ty, 0); - const bits = ty.bitSize(mod); + if (!field_ty.hasRuntimeBits(pt)) return o.builder.intConst(union_ty, 0); + const bits = ty.bitSize(pt); const llvm_int_ty = try o.builder.intType(@intCast(bits)); return o.lowerValueToInt(llvm_int_ty, arg_val); @@ -4219,7 +4228,7 @@ pub const Object = struct { // must pointer cast to the expected type before accessing the union. need_unnamed = layout.most_aligned_field != field_index; - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { const padding_len = layout.payload_size; break :p try o.builder.undefConst(try o.builder.arrayType(padding_len, .i8)); } @@ -4228,7 +4237,7 @@ pub const Object = struct { if (payload_ty != union_ty.structFields(&o.builder)[ @intFromBool(layout.tag_align.compare(.gte, layout.payload_align)) ]) need_unnamed = true; - const field_size = field_ty.abiSize(mod); + const field_size = field_ty.abiSize(pt); if (field_size == layout.payload_size) break :p payload; const padding_len = layout.payload_size - field_size; const padding_ty = try o.builder.arrayType(padding_len, .i8); @@ -4239,7 +4248,7 @@ pub const Object = struct { } else p: { assert(layout.tag_size == 0); if (container_layout == .@"packed") { - const bits = ty.bitSize(mod); + const bits = ty.bitSize(pt); const llvm_int_ty = try o.builder.intType(@intCast(bits)); return o.lowerValueToInt(llvm_int_ty, arg_val); @@ -4286,7 +4295,7 @@ pub const Object = struct { ty: Type, bigint: std.math.big.int.Const, ) Allocator.Error!Builder.Constant { - const mod = o.module; + const mod = o.pt.zcu; return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(mod).bits), bigint); } @@ -4295,7 +4304,8 @@ pub const Object = struct { ptr_val: InternPool.Index, prev_offset: u64, ) Error!Builder.Constant { - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr; const offset: u64 = prev_offset + ptr.byte_offset; return switch (ptr.base_addr) { @@ -4320,7 +4330,7 @@ pub const Object = struct { eu_ptr, offset + @import("../codegen.zig").errUnionPayloadOffset( Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu), - zcu, + pt, ), ), .opt_payload => |opt_ptr| try o.lowerPtr(opt_ptr, offset), @@ -4336,7 +4346,7 @@ pub const Object = struct { }; }, .Struct, .Union => switch (agg_ty.containerLayout(zcu)) { - .auto => agg_ty.structFieldOffset(@intCast(field.index), zcu), + .auto => agg_ty.structFieldOffset(@intCast(field.index), pt), .@"extern", .@"packed" => unreachable, }, else => unreachable, @@ -4353,7 +4363,8 @@ pub const Object = struct { o: *Object, anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, ) Error!Builder.Constant { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const decl_val = anon_decl.val; const decl_ty = Type.fromInterned(ip.typeOf(decl_val)); @@ -4370,14 +4381,14 @@ pub const Object = struct { const ptr_ty = Type.fromInterned(anon_decl.orig_ty); const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or + if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty); if (is_fn_body) @panic("TODO"); const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(mod), target); - const alignment = ptr_ty.ptrAlignment(mod); + const alignment = ptr_ty.ptrAlignment(pt); const llvm_global = (try o.resolveGlobalAnonDecl(decl_val, llvm_addr_space, alignment)).ptrConst(&o.builder).global; const llvm_val = try o.builder.convConst( @@ -4389,7 +4400,8 @@ pub const Object = struct { } fn lowerDeclRefValue(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; // In the case of something like: // fn foo() void {} @@ -4408,10 +4420,10 @@ pub const Object = struct { } const decl_ty = decl.typeOf(mod); - const ptr_ty = try decl.declPtrType(mod); + const ptr_ty = try decl.declPtrType(pt); const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or + if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) { return o.lowerPtrToVoid(ptr_ty); @@ -4431,7 +4443,7 @@ pub const Object = struct { } fn lowerPtrToVoid(o: *Object, ptr_ty: Type) Allocator.Error!Builder.Constant { - const mod = o.module; + const mod = o.pt.zcu; // Even though we are pointing at something which has zero bits (e.g. `void`), // Pointers are defined to have bits. So we must return something here. // The value cannot be undefined, because we use the `nonnull` annotation @@ -4459,20 +4471,21 @@ pub const Object = struct { /// RMW exchange of floating-point values is bitcasted to same-sized integer /// types to work around a LLVM deficiency when targeting ARM/AArch64. fn getAtomicAbiType(o: *Object, ty: Type, is_rmw_xchg: bool) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, .Enum => ty.intTagType(mod), .Float => { if (!is_rmw_xchg) return .none; - return o.builder.intType(@intCast(ty.abiSize(mod) * 8)); + return o.builder.intType(@intCast(ty.abiSize(pt) * 8)); }, .Bool => return .i8, else => return .none, }; const bit_count = int_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) { - return o.builder.intType(@intCast(int_ty.abiSize(mod) * 8)); + return o.builder.intType(@intCast(int_ty.abiSize(pt) * 8)); } else { return .none; } @@ -4486,7 +4499,8 @@ pub const Object = struct { fn_info: InternPool.Key.FuncType, llvm_arg_i: u32, ) Allocator.Error!void { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; if (param_ty.isPtrAtRuntime(mod)) { const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, param_index)) |i| { @@ -4507,7 +4521,7 @@ pub const Object = struct { const elem_align = if (ptr_info.flags.alignment != .none) ptr_info.flags.alignment else - Type.fromInterned(ptr_info.child).abiAlignment(mod).max(.@"1"); + Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1"); try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align.toLlvm() }, &o.builder); } else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) { .signed => try attributes.addParamAttr(llvm_arg_i, .signext, &o.builder), @@ -4540,7 +4554,7 @@ pub const Object = struct { const name = try o.builder.strtabString(lt_errors_fn_name); if (o.builder.getGlobal(name)) |llvm_fn| return llvm_fn.ptrConst(&o.builder).kind.function; - const zcu = o.module; + const zcu = o.pt.zcu; const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(.i1, &.{try o.errorIntType()}, .normal), @@ -4559,7 +4573,8 @@ pub const Object = struct { } fn getEnumTagNameFunction(o: *Object, enum_ty: Type) !Builder.Function.Index { - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const enum_type = ip.loadEnumType(enum_ty.toIntern()); @@ -4618,7 +4633,7 @@ pub const Object = struct { const return_block = try wip.block(1, "Name"); const this_tag_int_value = try o.lowerValue( - (try zcu.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), + (try pt.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), ); try wip_switch.addCase(this_tag_int_value, return_block, &wip); @@ -4636,13 +4651,13 @@ pub const Object = struct { pub const DeclGen = struct { object: *Object, - decl: *Module.Decl, + decl: *Zcu.Decl, decl_index: InternPool.DeclIndex, - err_msg: ?*Module.ErrorMsg, + err_msg: ?*Zcu.ErrorMsg, fn ownerModule(dg: DeclGen) *Package.Module { const o = dg.object; - const zcu = o.module; + const zcu = o.pt.zcu; const namespace = zcu.namespacePtr(dg.decl.src_namespace); const file_scope = namespace.fileScope(zcu); return file_scope.mod; @@ -4653,15 +4668,15 @@ pub const DeclGen = struct { assert(dg.err_msg == null); const o = dg.object; const gpa = o.gpa; - const mod = o.module; - const src_loc = dg.decl.navSrcLoc(mod); - dg.err_msg = try Module.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args); + const src_loc = dg.decl.navSrcLoc(o.pt.zcu); + dg.err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args); return error.CodegenFail; } fn genDecl(dg: *DeclGen) !void { const o = dg.object; - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const decl = dg.decl; const decl_index = dg.decl_index; @@ -4672,7 +4687,7 @@ pub const DeclGen = struct { } else { const variable_index = try o.resolveGlobalDecl(decl_index); variable_index.setAlignment( - decl.getAlignment(zcu).toLlvm(), + decl.getAlignment(pt).toLlvm(), &o.builder, ); if (decl.@"linksection".toSlice(ip)) |section| @@ -4833,23 +4848,21 @@ pub const FuncGen = struct { const gop = try self.func_inst_table.getOrPut(gpa, inst); if (gop.found_existing) return gop.value_ptr.*; - const o = self.dg.object; - const mod = o.module; - const llvm_val = try self.resolveValue((try self.air.value(inst, mod)).?); + const llvm_val = try self.resolveValue((try self.air.value(inst, self.dg.object.pt)).?); gop.value_ptr.* = llvm_val.toValue(); return llvm_val.toValue(); } fn resolveValue(self: *FuncGen, val: Value) Error!Builder.Constant { const o = self.dg.object; - const mod = o.module; - const ty = val.typeOf(mod); + const pt = o.pt; + const ty = val.typeOf(pt.zcu); const llvm_val = try o.lowerValue(val.toIntern()); - if (!isByRef(ty, mod)) return llvm_val; + if (!isByRef(ty, pt)) return llvm_val; // We have an LLVM value but we need to create a global constant and // set the value as its initializer, and then return a pointer to the global. - const target = mod.getTarget(); + const target = pt.zcu.getTarget(); const variable_index = try o.builder.addVariable( .empty, llvm_val.typeOf(&o.builder), @@ -4859,7 +4872,7 @@ pub const FuncGen = struct { variable_index.setLinkage(.private, &o.builder); variable_index.setMutability(.constant, &o.builder); variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); - variable_index.setAlignment(ty.abiAlignment(mod).toLlvm(), &o.builder); + variable_index.setAlignment(ty.abiAlignment(pt).toLlvm(), &o.builder); return o.builder.convConst( variable_index.toConst(&o.builder), try o.builder.ptrType(toLlvmAddressSpace(.generic, target)), @@ -4868,10 +4881,10 @@ pub const FuncGen = struct { fn resolveNullOptUsize(self: *FuncGen) Error!Builder.Constant { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; if (o.null_opt_usize == .no_init) { - o.null_opt_usize = try self.resolveValue(Value.fromInterned(try mod.intern(.{ .opt = .{ - .ty = try mod.intern(.{ .opt_type = .usize_type }), + o.null_opt_usize = try self.resolveValue(Value.fromInterned(try pt.intern(.{ .opt = .{ + .ty = try pt.intern(.{ .opt_type = .usize_type }), .val = .none, } }))); } @@ -4880,7 +4893,7 @@ pub const FuncGen = struct { fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body, 0..) |inst, i| { @@ -5145,7 +5158,8 @@ pub const FuncGen = struct { if (maybe_inline_func) |inline_func| { const o = self.dg.object; - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const func = zcu.funcInfo(inline_func); const decl_index = func.owner_decl; @@ -5161,7 +5175,7 @@ pub const FuncGen = struct { const fqn = try decl.fullyQualifiedName(zcu); - const fn_ty = try zcu.funcType(.{ + const fn_ty = try pt.funcType(.{ .param_types = &.{}, .return_type = .void_type, }); @@ -5228,7 +5242,8 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.Call, pl_op.payload); const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const callee_ty = self.typeOf(pl_op.operand); const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { @@ -5240,7 +5255,7 @@ pub const FuncGen = struct { const return_type = Type.fromInterned(fn_info.return_type); const llvm_fn = try self.resolveInst(pl_op.operand); const target = mod.getTarget(); - const sret = firstParamSRet(fn_info, mod, target); + const sret = firstParamSRet(fn_info, pt, target); var llvm_args = std.ArrayList(Builder.Value).init(self.gpa); defer llvm_args.deinit(); @@ -5258,14 +5273,13 @@ pub const FuncGen = struct { const llvm_ret_ty = try o.lowerType(return_type); try attributes.addParamAttr(0, .{ .sret = llvm_ret_ty }, &o.builder); - const alignment = return_type.abiAlignment(mod).toLlvm(); + const alignment = return_type.abiAlignment(pt).toLlvm(); const ret_ptr = try self.buildAllocaWorkaround(return_type, alignment); try llvm_args.append(ret_ptr); break :blk ret_ptr; }; - const err_return_tracing = return_type.isError(mod) and - o.module.comp.config.any_error_tracing; + const err_return_tracing = return_type.isError(mod) and mod.comp.config.any_error_tracing; if (err_return_tracing) { assert(self.err_ret_trace != .none); try llvm_args.append(self.err_ret_trace); @@ -5279,8 +5293,8 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const llvm_param_ty = try o.lowerType(param_ty); - if (isByRef(param_ty, mod)) { - const alignment = param_ty.abiAlignment(mod).toLlvm(); + if (isByRef(param_ty, pt)) { + const alignment = param_ty.abiAlignment(pt).toLlvm(); const loaded = try self.wip.load(.normal, llvm_param_ty, llvm_arg, alignment, ""); try llvm_args.append(loaded); } else { @@ -5291,10 +5305,10 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - if (isByRef(param_ty, mod)) { + if (isByRef(param_ty, pt)) { try llvm_args.append(llvm_arg); } else { - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const param_llvm_ty = llvm_arg.typeOfWip(&self.wip); const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment); _ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment); @@ -5306,10 +5320,10 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const param_llvm_ty = try o.lowerType(param_ty); const arg_ptr = try self.buildAllocaWorkaround(param_ty, alignment); - if (isByRef(param_ty, mod)) { + if (isByRef(param_ty, pt)) { const loaded = try self.wip.load(.normal, param_llvm_ty, llvm_arg, alignment, ""); _ = try self.wip.store(.normal, loaded, arg_ptr, alignment); } else { @@ -5321,16 +5335,16 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8)); + const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(pt) * 8)); - if (isByRef(param_ty, mod)) { - const alignment = param_ty.abiAlignment(mod).toLlvm(); + if (isByRef(param_ty, pt)) { + const alignment = param_ty.abiAlignment(pt).toLlvm(); const loaded = try self.wip.load(.normal, int_llvm_ty, llvm_arg, alignment, ""); try llvm_args.append(loaded); } else { // LLVM does not allow bitcasting structs so we must allocate // a local, store as one type, and then load as another type. - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const int_ptr = try self.buildAllocaWorkaround(param_ty, alignment); _ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment); const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, ""); @@ -5349,9 +5363,9 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_types = it.types_buffer[0..it.types_len]; const llvm_arg = try self.resolveInst(arg); - const is_by_ref = isByRef(param_ty, mod); + const is_by_ref = isByRef(param_ty, pt); const arg_ptr = if (is_by_ref) llvm_arg else ptr: { - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); break :ptr ptr; @@ -5377,8 +5391,8 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - const alignment = arg_ty.abiAlignment(mod).toLlvm(); - if (!isByRef(arg_ty, mod)) { + const alignment = arg_ty.abiAlignment(pt).toLlvm(); + if (!isByRef(arg_ty, pt)) { const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); llvm_arg = ptr; @@ -5395,8 +5409,8 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - const alignment = arg_ty.abiAlignment(mod).toLlvm(); - if (!isByRef(arg_ty, mod)) { + const alignment = arg_ty.abiAlignment(pt).toLlvm(); + if (!isByRef(arg_ty, pt)) { const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); llvm_arg = ptr; @@ -5418,7 +5432,7 @@ pub const FuncGen = struct { .byval => { const param_index = it.zig_index - 1; const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]); - if (!isByRef(param_ty, mod)) { + if (!isByRef(param_ty, pt)) { try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1); } }, @@ -5426,7 +5440,7 @@ pub const FuncGen = struct { const param_index = it.zig_index - 1; const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]); const param_llvm_ty = try o.lowerType(param_ty); - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder), @@ -5460,7 +5474,7 @@ pub const FuncGen = struct { const elem_align = (if (ptr_info.flags.alignment != .none) @as(InternPool.Alignment, ptr_info.flags.alignment) else - Type.fromInterned(ptr_info.child).abiAlignment(mod).max(.@"1")).toLlvm(); + Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1")).toLlvm(); try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder); }, }; @@ -5485,17 +5499,17 @@ pub const FuncGen = struct { return .none; } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(pt)) { return .none; } const llvm_ret_ty = try o.lowerType(return_type); if (ret_ptr) |rp| { - if (isByRef(return_type, mod)) { + if (isByRef(return_type, pt)) { return rp; } else { // our by-ref status disagrees with sret so we must load. - const return_alignment = return_type.abiAlignment(mod).toLlvm(); + const return_alignment = return_type.abiAlignment(pt).toLlvm(); return self.wip.load(.normal, llvm_ret_ty, rp, return_alignment, ""); } } @@ -5506,19 +5520,19 @@ pub const FuncGen = struct { // In this case the function return type is honoring the calling convention by having // a different LLVM type than the usual one. We solve this here at the callsite // by using our canonical type, then loading it if necessary. - const alignment = return_type.abiAlignment(mod).toLlvm(); + const alignment = return_type.abiAlignment(pt).toLlvm(); const rp = try self.buildAlloca(abi_ret_ty, alignment); _ = try self.wip.store(.normal, call, rp, alignment); - return if (isByRef(return_type, mod)) + return if (isByRef(return_type, pt)) rp else try self.wip.load(.normal, llvm_ret_ty, rp, alignment, ""); } - if (isByRef(return_type, mod)) { + if (isByRef(return_type, pt)) { // our by-ref status disagrees with sret so we must allocate, store, // and return the allocation pointer. - const alignment = return_type.abiAlignment(mod).toLlvm(); + const alignment = return_type.abiAlignment(pt).toLlvm(); const rp = try self.buildAlloca(llvm_ret_ty, alignment); _ = try self.wip.store(.normal, call, rp, alignment); return rp; @@ -5527,9 +5541,9 @@ pub const FuncGen = struct { } } - fn buildSimplePanic(fg: *FuncGen, panic_id: Module.PanicId) !void { + fn buildSimplePanic(fg: *FuncGen, panic_id: Zcu.PanicId) !void { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const msg_decl_index = mod.panic_messages[@intFromEnum(panic_id)].unwrap().?; const msg_decl = mod.declPtr(msg_decl_index); const msg_len = msg_decl.typeOf(mod).childType(mod).arrayLen(mod); @@ -5567,15 +5581,16 @@ pub const FuncGen = struct { fn airRet(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ret_ty = self.typeOf(un_op); if (self.ret_ptr != .none) { - const ptr_ty = try mod.singleMutPtrType(ret_ty); + const ptr_ty = try pt.singleMutPtrType(ret_ty); const operand = try self.resolveInst(un_op); - const val_is_undef = if (try self.air.value(un_op, mod)) |val| val.isUndefDeep(mod) else false; + const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(mod) else false; if (val_is_undef and safety) undef: { const ptr_info = ptr_ty.ptrInfo(mod); const needs_bitmask = (ptr_info.packed_offset.host_size != 0); @@ -5585,10 +5600,10 @@ pub const FuncGen = struct { // https://github.com/ziglang/zig/issues/15337 break :undef; } - const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(mod)); + const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(pt)); _ = try self.wip.callMemSet( self.ret_ptr, - ptr_ty.ptrAlignment(mod).toLlvm(), + ptr_ty.ptrAlignment(pt).toLlvm(), try o.builder.intValue(.i8, 0xaa), len, if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal, @@ -5615,7 +5630,7 @@ pub const FuncGen = struct { return .none; } const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?; - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (Type.fromInterned(fn_info.return_type).isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced @@ -5629,13 +5644,13 @@ pub const FuncGen = struct { const abi_ret_ty = try lowerFnRetTy(o, fn_info); const operand = try self.resolveInst(un_op); - const val_is_undef = if (try self.air.value(un_op, mod)) |val| val.isUndefDeep(mod) else false; - const alignment = ret_ty.abiAlignment(mod).toLlvm(); + const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(mod) else false; + const alignment = ret_ty.abiAlignment(pt).toLlvm(); if (val_is_undef and safety) { const llvm_ret_ty = operand.typeOfWip(&self.wip); const rp = try self.buildAlloca(llvm_ret_ty, alignment); - const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(mod)); + const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(pt)); _ = try self.wip.callMemSet( rp, alignment, @@ -5651,7 +5666,7 @@ pub const FuncGen = struct { return .none; } - if (isByRef(ret_ty, mod)) { + if (isByRef(ret_ty, pt)) { // operand is a pointer however self.ret_ptr is null so that means // we need to return a value. _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, operand, alignment, "")); @@ -5672,12 +5687,13 @@ pub const FuncGen = struct { fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(mod); const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?; - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (Type.fromInterned(fn_info.return_type).isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced @@ -5694,7 +5710,7 @@ pub const FuncGen = struct { } const ptr = try self.resolveInst(un_op); const abi_ret_ty = try lowerFnRetTy(o, fn_info); - const alignment = ret_ty.abiAlignment(mod).toLlvm(); + const alignment = ret_ty.abiAlignment(pt).toLlvm(); _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, ptr, alignment, "")); return .none; } @@ -5711,17 +5727,17 @@ pub const FuncGen = struct { fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; + const pt = o.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const src_list = try self.resolveInst(ty_op.operand); const va_list_ty = ty_op.ty.toType(); const llvm_va_list_ty = try o.lowerType(va_list_ty); - const mod = o.module; - const result_alignment = va_list_ty.abiAlignment(mod).toLlvm(); + const result_alignment = va_list_ty.abiAlignment(pt).toLlvm(); const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment); _ = try self.wip.callIntrinsic(.normal, .none, .va_copy, &.{}, &.{ dest_list, src_list }, ""); - return if (isByRef(va_list_ty, mod)) + return if (isByRef(va_list_ty, pt)) dest_list else try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, ""); @@ -5737,15 +5753,15 @@ pub const FuncGen = struct { fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const va_list_ty = self.typeOfIndex(inst); const llvm_va_list_ty = try o.lowerType(va_list_ty); - const result_alignment = va_list_ty.abiAlignment(mod).toLlvm(); + const result_alignment = va_list_ty.abiAlignment(pt).toLlvm(); const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment); _ = try self.wip.callIntrinsic(.normal, .none, .va_start, &.{}, &.{dest_list}, ""); - return if (isByRef(va_list_ty, mod)) + return if (isByRef(va_list_ty, pt)) dest_list else try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, ""); @@ -5802,21 +5818,22 @@ pub const FuncGen = struct { rhs: Builder.Value, ) Allocator.Error!Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const scalar_ty = operand_ty.scalarType(mod); const int_ty = switch (scalar_ty.zigTypeTag(mod)) { .Enum => scalar_ty.intTagType(mod), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt) or operand_ty.optionalReprIsPayload(mod)) { break :blk operand_ty; } // We need to emit instructions to check for equality/inequality // of optionals that are not pointers. - const is_by_ref = isByRef(scalar_ty, mod); + const is_by_ref = isByRef(scalar_ty, pt); const opt_llvm_ty = try o.lowerType(scalar_ty); const lhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, lhs, is_by_ref); const rhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, rhs, is_by_ref); @@ -5908,7 +5925,8 @@ pub const FuncGen = struct { body: []const Air.Inst.Index, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst_ty = self.typeOfIndex(inst); if (inst_ty.isNoReturn(mod)) { @@ -5916,7 +5934,7 @@ pub const FuncGen = struct { return .none; } - const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod); + const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt); var breaks: BreakList = if (have_block_result) .{ .list = .{} } else .{ .len = 0 }; defer if (have_block_result) breaks.list.deinit(self.gpa); @@ -5940,7 +5958,7 @@ pub const FuncGen = struct { // a pointer to it. LLVM IR allows the call instruction to use function bodies instead // of function pointers, however the phi makes it a runtime value and therefore // the LLVM type has to be wrapped in a pointer. - if (inst_ty.zigTypeTag(mod) == .Fn or isByRef(inst_ty, mod)) { + if (inst_ty.zigTypeTag(mod) == .Fn or isByRef(inst_ty, pt)) { break :ty .ptr; } break :ty raw_llvm_ty; @@ -5958,13 +5976,13 @@ pub const FuncGen = struct { fn airBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; + const pt = o.pt; const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br; const block = self.blocks.get(branch.block_inst).?; // Add the values to the lists only if the break provides a value. const operand_ty = self.typeOf(branch.operand); - const mod = o.module; - if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the @@ -5998,7 +6016,7 @@ pub const FuncGen = struct { fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const inst = body_tail[0]; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const err_union = try self.resolveInst(pl_op.operand); @@ -6006,14 +6024,14 @@ pub const FuncGen = struct { const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]); const err_union_ty = self.typeOf(pl_op.operand); const payload_ty = self.typeOfIndex(inst); - const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; + const can_elide_load = if (isByRef(payload_ty, pt)) self.canElideLoad(body_tail) else false; const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused); } fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try self.resolveInst(extra.data.ptr); @@ -6033,9 +6051,10 @@ pub const FuncGen = struct { is_unused: bool, ) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const payload_ty = err_union_ty.errorUnionPayload(mod); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt); const err_union_llvm_ty = try o.lowerType(err_union_ty); const error_type = try o.errorIntType(); @@ -6048,8 +6067,8 @@ pub const FuncGen = struct { else err_union; } - const err_field_index = try errUnionErrorOffset(payload_ty, mod); - if (operand_is_ptr or isByRef(err_union_ty, mod)) { + const err_field_index = try errUnionErrorOffset(payload_ty, pt); + if (operand_is_ptr or isByRef(err_union_ty, pt)) { const err_field_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, err_field_index, ""); // TODO add alignment to this load @@ -6077,13 +6096,13 @@ pub const FuncGen = struct { } if (is_unused) return .none; if (!payload_has_bits) return if (operand_is_ptr) err_union else .none; - const offset = try errUnionPayloadOffset(payload_ty, mod); + const offset = try errUnionPayloadOffset(payload_ty, pt); if (operand_is_ptr) { return fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, ""); - } else if (isByRef(err_union_ty, mod)) { + } else if (isByRef(err_union_ty, pt)) { const payload_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, ""); - const payload_alignment = payload_ty.abiAlignment(mod).toLlvm(); - if (isByRef(payload_ty, mod)) { + const payload_alignment = payload_ty.abiAlignment(pt).toLlvm(); + if (isByRef(payload_ty, pt)) { if (can_elide_load) return payload_ptr; @@ -6161,7 +6180,7 @@ pub const FuncGen = struct { fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]); @@ -6185,7 +6204,8 @@ pub const FuncGen = struct { fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_ty = self.typeOf(ty_op.operand); const array_ty = operand_ty.childType(mod); @@ -6193,7 +6213,7 @@ pub const FuncGen = struct { const len = try o.builder.intValue(llvm_usize, array_ty.arrayLen(mod)); const slice_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); - if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) + if (!array_ty.hasRuntimeBitsIgnoreComptime(pt)) return self.wip.buildAggregate(slice_llvm_ty, &.{ operand, len }, ""); const ptr = try self.wip.gep(.inbounds, try o.lowerType(array_ty), operand, &.{ try o.builder.intValue(llvm_usize, 0), try o.builder.intValue(llvm_usize, 0), @@ -6203,7 +6223,8 @@ pub const FuncGen = struct { fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const workaround_operand = try self.resolveInst(ty_op.operand); @@ -6213,7 +6234,7 @@ pub const FuncGen = struct { const operand = o: { // Work around LLVM bug. See https://github.com/ziglang/zig/issues/17381. - const bit_size = operand_scalar_ty.bitSize(mod); + const bit_size = operand_scalar_ty.bitSize(pt); for ([_]u8{ 8, 16, 32, 64, 128 }) |b| { if (bit_size < b) { break :o try self.wip.cast( @@ -6241,7 +6262,7 @@ pub const FuncGen = struct { "", ); - const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(mod))); + const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(pt))); const rt_int_ty = try o.builder.intType(rt_int_bits); var extended = try self.wip.conv( if (is_signed_int) .signed else .unsigned, @@ -6287,7 +6308,8 @@ pub const FuncGen = struct { _ = fast; const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const target = mod.getTarget(); const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6309,7 +6331,7 @@ pub const FuncGen = struct { ); } - const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(mod))); + const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(pt))); const ret_ty = try o.builder.intType(rt_int_bits); const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard @@ -6348,19 +6370,20 @@ pub const FuncGen = struct { fn sliceOrArrayPtr(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; return if (ty.isSlice(mod)) fg.wip.extractValue(ptr, &.{0}, "") else ptr; } fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const llvm_usize = try o.lowerType(Type.usize); switch (ty.ptrSize(mod)) { .Slice => { const len = try fg.wip.extractValue(ptr, &.{1}, ""); const elem_ty = ty.childType(mod); - const abi_size = elem_ty.abiSize(mod); + const abi_size = elem_ty.abiSize(pt); if (abi_size == 1) return len; const abi_size_llvm_val = try o.builder.intValue(llvm_usize, abi_size); return fg.wip.bin(.@"mul nuw", len, abi_size_llvm_val, ""); @@ -6368,7 +6391,7 @@ pub const FuncGen = struct { .One => { const array_ty = ty.childType(mod); const elem_ty = array_ty.childType(mod); - const abi_size = elem_ty.abiSize(mod); + const abi_size = elem_ty.abiSize(pt); return o.builder.intValue(llvm_usize, array_ty.arrayLen(mod) * abi_size); }, .Many, .C => unreachable, @@ -6383,7 +6406,7 @@ pub const FuncGen = struct { fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); const slice_ptr_ty = self.typeOf(ty_op.operand); @@ -6394,7 +6417,8 @@ pub const FuncGen = struct { fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const slice_ty = self.typeOf(bin_op.lhs); @@ -6404,11 +6428,11 @@ pub const FuncGen = struct { const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty); const base_ptr = try self.wip.extractValue(slice, &.{0}, ""); const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, ""); - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { if (self.canElideLoad(body_tail)) return ptr; - const elem_alignment = elem_ty.abiAlignment(mod).toLlvm(); + const elem_alignment = elem_ty.abiAlignment(pt).toLlvm(); return self.loadByRef(ptr, elem_ty, elem_alignment, .normal); } @@ -6417,7 +6441,7 @@ pub const FuncGen = struct { fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const slice_ty = self.typeOf(bin_op.lhs); @@ -6431,7 +6455,8 @@ pub const FuncGen = struct { fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -6440,15 +6465,15 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const array_llvm_ty = try o.lowerType(array_ty); const elem_ty = array_ty.childType(mod); - if (isByRef(array_ty, mod)) { + if (isByRef(array_ty, pt)) { const indices: [2]Builder.Value = .{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs, }; - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { const elem_ptr = try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, ""); if (canElideLoad(self, body_tail)) return elem_ptr; - const elem_alignment = elem_ty.abiAlignment(mod).toLlvm(); + const elem_alignment = elem_ty.abiAlignment(pt).toLlvm(); return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal); } else { const elem_ptr = @@ -6463,7 +6488,8 @@ pub const FuncGen = struct { fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); @@ -6477,9 +6503,9 @@ pub const FuncGen = struct { &.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs } else &.{rhs}, ""); - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { if (self.canElideLoad(body_tail)) return ptr; - const elem_alignment = elem_ty.abiAlignment(mod).toLlvm(); + const elem_alignment = elem_ty.abiAlignment(pt).toLlvm(); return self.loadByRef(ptr, elem_ty, elem_alignment, .normal); } @@ -6488,12 +6514,13 @@ pub const FuncGen = struct { fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(mod); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.resolveInst(bin_op.lhs); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return self.resolveInst(bin_op.lhs); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -6530,7 +6557,8 @@ pub const FuncGen = struct { fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -6538,27 +6566,27 @@ pub const FuncGen = struct { const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; - if (!isByRef(struct_ty, mod)) { - assert(!isByRef(field_ty, mod)); + if (!isByRef(struct_ty, pt)) { + assert(!isByRef(field_ty, pt)); switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout(mod)) { .@"packed" => { const struct_type = mod.typeToStruct(struct_ty).?; - const bit_offset = mod.structPackedFieldBitOffset(struct_type, field_index); + const bit_offset = pt.structPackedFieldBitOffset(struct_type, field_index); const containing_int = struct_llvm_val; const shift_amt = try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset); const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, ""); const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, ""); @@ -6575,12 +6603,12 @@ pub const FuncGen = struct { const containing_int = struct_llvm_val; const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const truncated_int = try self.wip.cast(.trunc, containing_int, same_size_int, ""); return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const truncated_int = try self.wip.cast(.trunc, containing_int, same_size_int, ""); return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, ""); @@ -6599,12 +6627,12 @@ pub const FuncGen = struct { const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?; const field_ptr = try self.wip.gepStruct(struct_llvm_ty, struct_llvm_val, llvm_field_index, ""); - const alignment = struct_ty.structFieldAlign(field_index, mod); - const field_ptr_ty = try mod.ptrType(.{ + const alignment = struct_ty.structFieldAlign(field_index, pt); + const field_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = .{ .alignment = alignment }, }); - if (isByRef(field_ty, mod)) { + if (isByRef(field_ty, pt)) { if (canElideLoad(self, body_tail)) return field_ptr; @@ -6617,12 +6645,12 @@ pub const FuncGen = struct { }, .Union => { const union_llvm_ty = try o.lowerType(struct_ty); - const layout = struct_ty.unionGetLayout(mod); + const layout = struct_ty.unionGetLayout(pt); const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align)); const field_ptr = try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, ""); const payload_alignment = layout.payload_align.toLlvm(); - if (isByRef(field_ty, mod)) { + if (isByRef(field_ty, pt)) { if (canElideLoad(self, body_tail)) return field_ptr; return self.loadByRef(field_ptr, field_ty, payload_alignment, .normal); } else { @@ -6635,14 +6663,15 @@ pub const FuncGen = struct { fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try self.resolveInst(extra.field_ptr); const parent_ty = ty_pl.ty.toType().childType(mod); - const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); + const field_offset = parent_ty.structFieldOffset(extra.field_index, pt); if (field_offset == 0) return field_ptr; const res_ty = try o.lowerType(ty_pl.ty.toType()); @@ -6696,7 +6725,7 @@ pub const FuncGen = struct { fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const operand = try self.resolveInst(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -6743,9 +6772,9 @@ pub const FuncGen = struct { try o.lowerDebugType(operand_ty), ); - const zcu = o.module; + const pt = o.pt; const owner_mod = self.dg.ownerModule(); - if (isByRef(operand_ty, zcu)) { + if (isByRef(operand_ty, pt)) { _ = try self.wip.callIntrinsic( .normal, .none, @@ -6759,7 +6788,7 @@ pub const FuncGen = struct { "", ); } else if (owner_mod.optimize_mode == .Debug) { - const alignment = operand_ty.abiAlignment(zcu).toLlvm(); + const alignment = operand_ty.abiAlignment(pt).toLlvm(); const alloca = try self.buildAlloca(operand.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, operand, alloca, alignment); _ = try self.wip.callIntrinsic( @@ -6830,7 +6859,8 @@ pub const FuncGen = struct { // This stores whether we need to add an elementtype attribute and // if so, the element type itself. const llvm_param_attrs = try arena.alloc(Builder.Type, max_param_count); - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const target = mod.getTarget(); var llvm_ret_i: usize = 0; @@ -6930,13 +6960,13 @@ pub const FuncGen = struct { const arg_llvm_value = try self.resolveInst(input); const arg_ty = self.typeOf(input); - const is_by_ref = isByRef(arg_ty, mod); + const is_by_ref = isByRef(arg_ty, pt); if (is_by_ref) { if (constraintAllowsMemory(constraint)) { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip); } else { - const alignment = arg_ty.abiAlignment(mod).toLlvm(); + const alignment = arg_ty.abiAlignment(pt).toLlvm(); const arg_llvm_ty = try o.lowerType(arg_ty); const load_inst = try self.wip.load(.normal, arg_llvm_ty, arg_llvm_value, alignment, ""); @@ -6948,7 +6978,7 @@ pub const FuncGen = struct { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip); } else { - const alignment = arg_ty.abiAlignment(mod).toLlvm(); + const alignment = arg_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try self.buildAlloca(arg_llvm_value.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, arg_llvm_value, arg_ptr, alignment); llvm_param_values[llvm_param_i] = arg_ptr; @@ -7000,7 +7030,7 @@ pub const FuncGen = struct { llvm_param_values[llvm_param_i] = llvm_rw_val; llvm_param_types[llvm_param_i] = llvm_rw_val.typeOfWip(&self.wip); } else { - const alignment = rw_ty.abiAlignment(mod).toLlvm(); + const alignment = rw_ty.abiAlignment(pt).toLlvm(); const loaded = try self.wip.load(.normal, llvm_elem_ty, llvm_rw_val, alignment, ""); llvm_param_values[llvm_param_i] = loaded; llvm_param_types[llvm_param_i] = llvm_elem_ty; @@ -7161,7 +7191,7 @@ pub const FuncGen = struct { const output_ptr = try self.resolveInst(output); const output_ptr_ty = self.typeOf(output); - const alignment = output_ptr_ty.ptrAlignment(mod).toLlvm(); + const alignment = output_ptr_ty.ptrAlignment(pt).toLlvm(); _ = try self.wip.store(.normal, output_value, output_ptr, alignment); } else { ret_val = output_value; @@ -7179,7 +7209,8 @@ pub const FuncGen = struct { cond: Builder.IntegerCondition, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); @@ -7204,7 +7235,7 @@ pub const FuncGen = struct { comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { const loaded = if (operand_is_ptr) try self.wip.load(.normal, optional_llvm_ty, operand, .default, "") else @@ -7212,7 +7243,7 @@ pub const FuncGen = struct { return self.wip.icmp(cond, loaded, try o.builder.intValue(.i8, 0), ""); } - const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod); + const is_by_ref = operand_is_ptr or isByRef(optional_ty, pt); return self.optCmpNull(cond, optional_llvm_ty, operand, is_by_ref); } @@ -7223,7 +7254,8 @@ pub const FuncGen = struct { operand_is_ptr: bool, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); @@ -7241,7 +7273,7 @@ pub const FuncGen = struct { return val.toValue(); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { const loaded = if (operand_is_ptr) try self.wip.load(.normal, try o.lowerType(err_union_ty), operand, .default, "") else @@ -7249,9 +7281,9 @@ pub const FuncGen = struct { return self.wip.icmp(cond, loaded, zero, ""); } - const err_field_index = try errUnionErrorOffset(payload_ty, mod); + const err_field_index = try errUnionErrorOffset(payload_ty, pt); - const loaded = if (operand_is_ptr or isByRef(err_union_ty, mod)) loaded: { + const loaded = if (operand_is_ptr or isByRef(err_union_ty, pt)) loaded: { const err_union_llvm_ty = try o.lowerType(err_union_ty); const err_field_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, err_field_index, ""); @@ -7262,12 +7294,13 @@ pub const FuncGen = struct { fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = optional_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // We have a pointer to a zero-bit value and we need to return // a pointer to a zero-bit value. return operand; @@ -7283,13 +7316,14 @@ pub const FuncGen = struct { comptime assert(optional_layout_version == 3); const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = optional_ty.optionalChild(mod); const non_null_bit = try o.builder.intValue(.i8, 1); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. _ = try self.wip.store(.normal, non_null_bit, operand, .default); return operand; @@ -7314,13 +7348,14 @@ pub const FuncGen = struct { fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand); const payload_ty = self.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; if (optional_ty.optionalReprIsPayload(mod)) { // Payload value is the same as the optional value. @@ -7328,7 +7363,7 @@ pub const FuncGen = struct { } const opt_llvm_ty = try o.lowerType(optional_ty); - const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; + const can_elide_load = if (isByRef(payload_ty, pt)) self.canElideLoad(body_tail) else false; return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load); } @@ -7338,7 +7373,8 @@ pub const FuncGen = struct { operand_is_ptr: bool, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -7347,17 +7383,17 @@ pub const FuncGen = struct { const result_ty = self.typeOfIndex(inst); const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty; - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return if (operand_is_ptr) operand else .none; } - const offset = try errUnionPayloadOffset(payload_ty, mod); + const offset = try errUnionPayloadOffset(payload_ty, pt); const err_union_llvm_ty = try o.lowerType(err_union_ty); if (operand_is_ptr) { return self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); - } else if (isByRef(err_union_ty, mod)) { - const payload_alignment = payload_ty.abiAlignment(mod).toLlvm(); + } else if (isByRef(err_union_ty, pt)) { + const payload_alignment = payload_ty.abiAlignment(pt).toLlvm(); const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); - if (isByRef(payload_ty, mod)) { + if (isByRef(payload_ty, pt)) { if (self.canElideLoad(body_tail)) return payload_ptr; return self.loadByRef(payload_ptr, payload_ty, payload_alignment, .normal); } @@ -7373,7 +7409,8 @@ pub const FuncGen = struct { operand_is_ptr: bool, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -7388,14 +7425,14 @@ pub const FuncGen = struct { } const payload_ty = err_union_ty.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (!operand_is_ptr) return operand; return self.wip.load(.normal, error_type, operand, .default, ""); } - const offset = try errUnionErrorOffset(payload_ty, mod); + const offset = try errUnionErrorOffset(payload_ty, pt); - if (operand_is_ptr or isByRef(err_union_ty, mod)) { + if (operand_is_ptr or isByRef(err_union_ty, pt)) { const err_union_llvm_ty = try o.lowerType(err_union_ty); const err_field_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); return self.wip.load(.normal, error_type, err_field_ptr, .default, ""); @@ -7406,22 +7443,23 @@ pub const FuncGen = struct { fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(mod); const non_error_val = try o.builder.intValue(try o.errorIntType(), 0); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { _ = try self.wip.store(.normal, non_error_val, operand, .default); return operand; } const err_union_llvm_ty = try o.lowerType(err_union_ty); { - const err_int_ty = try mod.errorIntType(); - const error_alignment = err_int_ty.abiAlignment(mod).toLlvm(); - const error_offset = try errUnionErrorOffset(payload_ty, mod); + const err_int_ty = try pt.errorIntType(); + const error_alignment = err_int_ty.abiAlignment(pt).toLlvm(); + const error_offset = try errUnionErrorOffset(payload_ty, pt); // First set the non-error value. const non_null_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, error_offset, ""); _ = try self.wip.store(.normal, non_error_val, non_null_ptr, error_alignment); @@ -7429,7 +7467,7 @@ pub const FuncGen = struct { // Then return the payload pointer (only if it is used). if (self.liveness.isUnused(inst)) return .none; - const payload_offset = try errUnionPayloadOffset(payload_ty, mod); + const payload_offset = try errUnionPayloadOffset(payload_ty, pt); return self.wip.gepStruct(err_union_llvm_ty, operand, payload_offset, ""); } @@ -7446,19 +7484,21 @@ pub const FuncGen = struct { fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; + const pt = o.pt; + const mod = pt.zcu; + const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const struct_ty = ty_pl.ty.toType(); const field_index = ty_pl.payload; - const mod = o.module; const struct_llvm_ty = try o.lowerType(struct_ty); const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?; assert(self.err_ret_trace != .none); const field_ptr = try self.wip.gepStruct(struct_llvm_ty, self.err_ret_trace, llvm_field_index, ""); - const field_alignment = struct_ty.structFieldAlign(field_index, mod); + const field_alignment = struct_ty.structFieldAlign(field_index, pt); const field_ty = struct_ty.structFieldType(field_index, mod); - const field_ptr_ty = try mod.ptrType(.{ + const field_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = .{ .alignment = field_alignment }, }); @@ -7490,29 +7530,30 @@ pub const FuncGen = struct { fn airWrapOptional(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const payload_ty = self.typeOf(ty_op.operand); const non_null_bit = try o.builder.intValue(.i8, 1); comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOfIndex(inst); if (optional_ty.optionalReprIsPayload(mod)) return operand; const llvm_optional_ty = try o.lowerType(optional_ty); - if (isByRef(optional_ty, mod)) { + if (isByRef(optional_ty, pt)) { const directReturn = self.isNextRet(body_tail); const optional_ptr = if (directReturn) self.ret_ptr else brk: { - const alignment = optional_ty.abiAlignment(mod).toLlvm(); + const alignment = optional_ty.abiAlignment(pt).toLlvm(); const optional_ptr = try self.buildAllocaWorkaround(optional_ty, alignment); break :brk optional_ptr; }; const payload_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 0, ""); - const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); + const payload_ptr_ty = try pt.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .none); const non_null_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 1, ""); _ = try self.wip.store(.normal, non_null_bit, non_null_ptr, .default); @@ -7523,36 +7564,36 @@ pub const FuncGen = struct { fn airWrapErrUnionPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const err_un_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const payload_ty = self.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return operand; } const ok_err_code = try o.builder.intValue(try o.errorIntType(), 0); const err_un_llvm_ty = try o.lowerType(err_un_ty); - const payload_offset = try errUnionPayloadOffset(payload_ty, mod); - const error_offset = try errUnionErrorOffset(payload_ty, mod); - if (isByRef(err_un_ty, mod)) { + const payload_offset = try errUnionPayloadOffset(payload_ty, pt); + const error_offset = try errUnionErrorOffset(payload_ty, pt); + if (isByRef(err_un_ty, pt)) { const directReturn = self.isNextRet(body_tail); const result_ptr = if (directReturn) self.ret_ptr else brk: { - const alignment = err_un_ty.abiAlignment(mod).toLlvm(); + const alignment = err_un_ty.abiAlignment(pt).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment); break :brk result_ptr; }; const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, ""); - const err_int_ty = try mod.errorIntType(); - const error_alignment = err_int_ty.abiAlignment(mod).toLlvm(); + const err_int_ty = try pt.errorIntType(); + const error_alignment = err_int_ty.abiAlignment(pt).toLlvm(); _ = try self.wip.store(.normal, ok_err_code, err_ptr, error_alignment); const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, ""); - const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); + const payload_ptr_ty = try pt.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .none); return result_ptr; } @@ -7564,33 +7605,34 @@ pub const FuncGen = struct { fn airWrapErrUnionErr(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const err_un_ty = self.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return operand; const err_un_llvm_ty = try o.lowerType(err_un_ty); - const payload_offset = try errUnionPayloadOffset(payload_ty, mod); - const error_offset = try errUnionErrorOffset(payload_ty, mod); - if (isByRef(err_un_ty, mod)) { + const payload_offset = try errUnionPayloadOffset(payload_ty, pt); + const error_offset = try errUnionErrorOffset(payload_ty, pt); + if (isByRef(err_un_ty, pt)) { const directReturn = self.isNextRet(body_tail); const result_ptr = if (directReturn) self.ret_ptr else brk: { - const alignment = err_un_ty.abiAlignment(mod).toLlvm(); + const alignment = err_un_ty.abiAlignment(pt).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment); break :brk result_ptr; }; const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, ""); - const err_int_ty = try mod.errorIntType(); - const error_alignment = err_int_ty.abiAlignment(mod).toLlvm(); + const err_int_ty = try pt.errorIntType(); + const error_alignment = err_int_ty.abiAlignment(pt).toLlvm(); _ = try self.wip.store(.normal, operand, err_ptr, error_alignment); const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, ""); - const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); + const payload_ptr_ty = try pt.singleMutPtrType(payload_ty); // TODO store undef to payload_ptr _ = payload_ptr; _ = payload_ptr_ty; @@ -7624,7 +7666,8 @@ pub const FuncGen = struct { fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem; const extra = self.air.extraData(Air.Bin, data.payload).data; @@ -7636,7 +7679,7 @@ pub const FuncGen = struct { const access_kind: Builder.MemoryAccessKind = if (vector_ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal; const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod)); - const alignment = vector_ptr_ty.ptrAlignment(mod).toLlvm(); + const alignment = vector_ptr_ty.ptrAlignment(pt).toLlvm(); const loaded = try self.wip.load(access_kind, elem_llvm_ty, vector_ptr, alignment, ""); const new_vector = try self.wip.insertElement(loaded, operand, index, ""); @@ -7646,7 +7689,7 @@ pub const FuncGen = struct { fn airMin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7666,7 +7709,7 @@ pub const FuncGen = struct { fn airMax(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7696,7 +7739,7 @@ pub const FuncGen = struct { fn airAdd(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7714,7 +7757,7 @@ pub const FuncGen = struct { unsigned_intrinsic: Builder.Intrinsic, ) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try fg.resolveInst(bin_op.lhs); @@ -7762,7 +7805,7 @@ pub const FuncGen = struct { fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7782,7 +7825,7 @@ pub const FuncGen = struct { fn airSub(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7803,7 +7846,7 @@ pub const FuncGen = struct { fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7823,7 +7866,7 @@ pub const FuncGen = struct { fn airMul(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7844,7 +7887,7 @@ pub const FuncGen = struct { fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7873,7 +7916,7 @@ pub const FuncGen = struct { fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7889,7 +7932,7 @@ pub const FuncGen = struct { fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7921,7 +7964,7 @@ pub const FuncGen = struct { fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7939,7 +7982,7 @@ pub const FuncGen = struct { fn airRem(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7956,7 +7999,7 @@ pub const FuncGen = struct { fn airMod(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7992,7 +8035,7 @@ pub const FuncGen = struct { fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); @@ -8014,7 +8057,7 @@ pub const FuncGen = struct { fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); @@ -8042,7 +8085,8 @@ pub const FuncGen = struct { unsigned_intrinsic: Builder.Intrinsic, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -8065,8 +8109,8 @@ pub const FuncGen = struct { const result_index = o.llvmFieldIndex(inst_ty, 0).?; const overflow_index = o.llvmFieldIndex(inst_ty, 1).?; - if (isByRef(inst_ty, mod)) { - const result_alignment = inst_ty.abiAlignment(mod).toLlvm(); + if (isByRef(inst_ty, pt)) { + const result_alignment = inst_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(inst_ty, result_alignment); { const field_ptr = try self.wip.gepStruct(llvm_inst_ty, alloca_inst, result_index, ""); @@ -8135,7 +8179,7 @@ pub const FuncGen = struct { return o.builder.addFunction( try o.builder.fnType(return_type, param_types, .normal), fn_name, - toLlvmAddressSpace(.generic, o.module.getTarget()), + toLlvmAddressSpace(.generic, o.pt.zcu.getTarget()), ); } @@ -8149,8 +8193,8 @@ pub const FuncGen = struct { params: [2]Builder.Value, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; - const target = o.module.getTarget(); + const mod = o.pt.zcu; + const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); const scalar_llvm_ty = try o.lowerType(scalar_ty); @@ -8255,7 +8299,7 @@ pub const FuncGen = struct { params: [params_len]Builder.Value, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); const llvm_ty = try o.lowerType(ty); @@ -8396,7 +8440,8 @@ pub const FuncGen = struct { fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -8422,8 +8467,8 @@ pub const FuncGen = struct { const result_index = o.llvmFieldIndex(dest_ty, 0).?; const overflow_index = o.llvmFieldIndex(dest_ty, 1).?; - if (isByRef(dest_ty, mod)) { - const result_alignment = dest_ty.abiAlignment(mod).toLlvm(); + if (isByRef(dest_ty, pt)) { + const result_alignment = dest_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(dest_ty, result_alignment); { const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, ""); @@ -8466,7 +8511,7 @@ pub const FuncGen = struct { fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -8497,7 +8542,8 @@ pub const FuncGen = struct { fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -8505,7 +8551,7 @@ pub const FuncGen = struct { const lhs_ty = self.typeOf(bin_op.lhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); - const lhs_bits = lhs_scalar_ty.bitSize(mod); + const lhs_bits = lhs_scalar_ty.bitSize(pt); const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); @@ -8539,7 +8585,7 @@ pub const FuncGen = struct { fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -8558,7 +8604,7 @@ pub const FuncGen = struct { fn airAbs(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -8580,7 +8626,7 @@ pub const FuncGen = struct { fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const dest_ty = self.typeOfIndex(inst); const dest_llvm_ty = try o.lowerType(dest_ty); @@ -8604,7 +8650,7 @@ pub const FuncGen = struct { fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -8638,7 +8684,7 @@ pub const FuncGen = struct { fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -8696,9 +8742,10 @@ pub const FuncGen = struct { fn bitCast(self: *FuncGen, operand: Builder.Value, operand_ty: Type, inst_ty: Type) !Builder.Value { const o = self.dg.object; - const mod = o.module; - const operand_is_ref = isByRef(operand_ty, mod); - const result_is_ref = isByRef(inst_ty, mod); + const pt = o.pt; + const mod = pt.zcu; + const operand_is_ref = isByRef(operand_ty, pt); + const result_is_ref = isByRef(inst_ty, pt); const llvm_dest_ty = try o.lowerType(inst_ty); if (operand_is_ref and result_is_ref) { @@ -8721,9 +8768,9 @@ pub const FuncGen = struct { if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } - const alignment = inst_ty.abiAlignment(mod).toLlvm(); + const alignment = inst_ty.abiAlignment(pt).toLlvm(); const array_ptr = try self.buildAllocaWorkaround(inst_ty, alignment); - const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; + const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8; if (bitcast_ok) { _ = try self.wip.store(.normal, operand, array_ptr, alignment); } else { @@ -8748,11 +8795,11 @@ pub const FuncGen = struct { const llvm_vector_ty = try o.lowerType(inst_ty); if (!operand_is_ref) return self.dg.todo("implement bitcast non-ref array to vector", .{}); - const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; + const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8; if (bitcast_ok) { // The array is aligned to the element's alignment, while the vector might have a completely // different alignment. This means we need to enforce the alignment of this load. - const alignment = elem_ty.abiAlignment(mod).toLlvm(); + const alignment = elem_ty.abiAlignment(pt).toLlvm(); return self.wip.load(.normal, llvm_vector_ty, operand, alignment, ""); } else { // If the ABI size of the element type is not evenly divisible by size in bits; @@ -8777,24 +8824,25 @@ pub const FuncGen = struct { } if (operand_is_ref) { - const alignment = operand_ty.abiAlignment(mod).toLlvm(); + const alignment = operand_ty.abiAlignment(pt).toLlvm(); return self.wip.load(.normal, llvm_dest_ty, operand, alignment, ""); } if (result_is_ref) { - const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm(); + const alignment = operand_ty.abiAlignment(pt).max(inst_ty.abiAlignment(pt)).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment); _ = try self.wip.store(.normal, operand, result_ptr, alignment); return result_ptr; } if (llvm_dest_ty.isStruct(&o.builder) or - ((operand_ty.zigTypeTag(mod) == .Vector or inst_ty.zigTypeTag(mod) == .Vector) and operand_ty.bitSize(mod) != inst_ty.bitSize(mod))) + ((operand_ty.zigTypeTag(mod) == .Vector or inst_ty.zigTypeTag(mod) == .Vector) and + operand_ty.bitSize(pt) != inst_ty.bitSize(pt))) { // Both our operand and our result are values, not pointers, // but LLVM won't let us bitcast struct values or vectors with padding bits. // Therefore, we store operand to alloca, then load for result. - const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm(); + const alignment = operand_ty.abiAlignment(pt).max(inst_ty.abiAlignment(pt)).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment); _ = try self.wip.store(.normal, operand, result_ptr, alignment); return self.wip.load(.normal, llvm_dest_ty, result_ptr, alignment, ""); @@ -8811,7 +8859,8 @@ pub const FuncGen = struct { fn airArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const arg_val = self.args[self.arg_index]; self.arg_index += 1; @@ -8847,7 +8896,7 @@ pub const FuncGen = struct { }; const owner_mod = self.dg.ownerModule(); - if (isByRef(inst_ty, mod)) { + if (isByRef(inst_ty, pt)) { _ = try self.wip.callIntrinsic( .normal, .none, @@ -8861,7 +8910,7 @@ pub const FuncGen = struct { "", ); } else if (owner_mod.optimize_mode == .Debug) { - const alignment = inst_ty.abiAlignment(mod).toLlvm(); + const alignment = inst_ty.abiAlignment(pt).toLlvm(); const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, arg_val, alloca, alignment); _ = try self.wip.callIntrinsic( @@ -8897,27 +8946,29 @@ pub const FuncGen = struct { fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ptr_ty = self.typeOfIndex(inst); const pointee_type = ptr_ty.childType(mod); - if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) + if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return (try o.lowerPtrToVoid(ptr_ty)).toValue(); //const pointee_llvm_ty = try o.lowerType(pointee_type); - const alignment = ptr_ty.ptrAlignment(mod).toLlvm(); + const alignment = ptr_ty.ptrAlignment(pt).toLlvm(); return self.buildAllocaWorkaround(pointee_type, alignment); } fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ptr_ty = self.typeOfIndex(inst); const ret_ty = ptr_ty.childType(mod); - if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) + if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return (try o.lowerPtrToVoid(ptr_ty)).toValue(); if (self.ret_ptr != .none) return self.ret_ptr; //const ret_llvm_ty = try o.lowerType(ret_ty); - const alignment = ptr_ty.ptrAlignment(mod).toLlvm(); + const alignment = ptr_ty.ptrAlignment(pt).toLlvm(); return self.buildAllocaWorkaround(ret_ty, alignment); } @@ -8928,7 +8979,7 @@ pub const FuncGen = struct { llvm_ty: Builder.Type, alignment: Builder.Alignment, ) Allocator.Error!Builder.Value { - const target = self.dg.object.module.getTarget(); + const target = self.dg.object.pt.zcu.getTarget(); return buildAllocaInner(&self.wip, llvm_ty, alignment, target); } @@ -8939,18 +8990,19 @@ pub const FuncGen = struct { alignment: Builder.Alignment, ) Allocator.Error!Builder.Value { const o = self.dg.object; - return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.module), .i8), alignment); + return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.pt), .i8), alignment); } fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_ptr = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(mod) else false; if (val_is_undef) { const ptr_info = ptr_ty.ptrInfo(mod); const needs_bitmask = (ptr_info.packed_offset.host_size != 0); @@ -8964,10 +9016,10 @@ pub const FuncGen = struct { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. - const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(mod)); + const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(pt)); _ = try self.wip.callMemSet( dest_ptr, - ptr_ty.ptrAlignment(mod).toLlvm(), + ptr_ty.ptrAlignment(pt).toLlvm(), if (safety) try o.builder.intValue(.i8, 0xaa) else try o.builder.undefValue(.i8), len, if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal, @@ -8992,7 +9044,7 @@ pub const FuncGen = struct { /// The first instruction of `body_tail` is the one whose copy we want to elide. fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ip = &mod.intern_pool; for (body_tail[1..]) |body_inst| { switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) { @@ -9008,7 +9060,8 @@ pub const FuncGen = struct { fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ptr_ty = fg.typeOf(ty_op.operand); @@ -9016,7 +9069,7 @@ pub const FuncGen = struct { const ptr = try fg.resolveInst(ty_op.operand); elide: { - if (!isByRef(Type.fromInterned(ptr_info.child), mod)) break :elide; + if (!isByRef(Type.fromInterned(ptr_info.child), pt)) break :elide; if (!canElideLoad(fg, body_tail)) break :elide; return ptr; } @@ -9040,7 +9093,7 @@ pub const FuncGen = struct { _ = inst; const o = self.dg.object; const llvm_usize = try o.lowerType(Type.usize); - if (!target_util.supportsReturnAddress(o.module.getTarget())) { + if (!target_util.supportsReturnAddress(o.pt.zcu.getTarget())) { // https://github.com/ziglang/zig/issues/11946 return o.builder.intValue(llvm_usize, 0); } @@ -9068,7 +9121,8 @@ pub const FuncGen = struct { kind: Builder.Function.Instruction.CmpXchg.Kind, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr = try self.resolveInst(extra.ptr); @@ -9095,7 +9149,7 @@ pub const FuncGen = struct { self.sync_scope, toLlvmAtomicOrdering(extra.successOrder()), toLlvmAtomicOrdering(extra.failureOrder()), - ptr_ty.ptrAlignment(mod).toLlvm(), + ptr_ty.ptrAlignment(pt).toLlvm(), "", ); @@ -9118,7 +9172,8 @@ pub const FuncGen = struct { fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); @@ -9134,7 +9189,7 @@ pub const FuncGen = struct { const access_kind: Builder.MemoryAccessKind = if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal; - const ptr_alignment = ptr_ty.ptrAlignment(mod).toLlvm(); + const ptr_alignment = ptr_ty.ptrAlignment(pt).toLlvm(); if (llvm_abi_ty != .none) { // operand needs widening and truncating or bitcasting. @@ -9181,19 +9236,20 @@ pub const FuncGen = struct { fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); const ptr_ty = self.typeOf(atomic_load.ptr); const info = ptr_ty.ptrInfo(mod); const elem_ty = Type.fromInterned(info.child); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; const ordering = toLlvmAtomicOrdering(atomic_load.order); const llvm_abi_ty = try o.getAtomicAbiType(elem_ty, false); const ptr_alignment = (if (info.flags.alignment != .none) @as(InternPool.Alignment, info.flags.alignment) else - Type.fromInterned(info.child).abiAlignment(mod)).toLlvm(); + Type.fromInterned(info.child).abiAlignment(pt)).toLlvm(); const access_kind: Builder.MemoryAccessKind = if (info.flags.is_volatile) .@"volatile" else .normal; const elem_llvm_ty = try o.lowerType(elem_ty); @@ -9228,11 +9284,12 @@ pub const FuncGen = struct { ordering: Builder.AtomicOrdering, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .none; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false); @@ -9252,12 +9309,13 @@ pub const FuncGen = struct { fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = self.typeOf(bin_op.rhs); - const dest_ptr_align = ptr_ty.ptrAlignment(mod).toLlvm(); + const dest_ptr_align = ptr_ty.ptrAlignment(pt).toLlvm(); const dest_ptr = try self.sliceOrArrayPtr(dest_slice, ptr_ty); const access_kind: Builder.MemoryAccessKind = if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal; @@ -9270,7 +9328,7 @@ pub const FuncGen = struct { ptr_ty.isSlice(mod) and std.Target.wasm.featureSetHas(o.target.cpu.features, .bulk_memory); - if (try self.air.value(bin_op.rhs, mod)) |elem_val| { + if (try self.air.value(bin_op.rhs, pt)) |elem_val| { if (elem_val.isUndefDeep(mod)) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -9296,7 +9354,7 @@ pub const FuncGen = struct { // repeating byte pattern, for example, `@as(u64, 0)` has a // repeating byte pattern of 0 bytes. In such case, the memset // intrinsic can be used. - if (try elem_val.hasRepeatedByteRepr(elem_ty, mod)) |byte_val| { + if (try elem_val.hasRepeatedByteRepr(elem_ty, pt)) |byte_val| { const fill_byte = try o.builder.intValue(.i8, byte_val); const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { @@ -9309,7 +9367,7 @@ pub const FuncGen = struct { } const value = try self.resolveInst(bin_op.rhs); - const elem_abi_size = elem_ty.abiSize(mod); + const elem_abi_size = elem_ty.abiSize(pt); if (elem_abi_size == 1) { // In this case we can take advantage of LLVM's intrinsic. @@ -9361,9 +9419,9 @@ pub const FuncGen = struct { _ = try self.wip.brCond(end, body_block, end_block); self.wip.cursor = .{ .block = body_block }; - const elem_abi_align = elem_ty.abiAlignment(mod); + const elem_abi_align = elem_ty.abiAlignment(pt); const it_ptr_align = InternPool.Alignment.fromLlvm(dest_ptr_align).min(elem_abi_align).toLlvm(); - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { _ = try self.wip.callMemCpy( it_ptr.toValue(), it_ptr_align, @@ -9405,7 +9463,8 @@ pub const FuncGen = struct { fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); const dest_ptr_ty = self.typeOf(bin_op.lhs); @@ -9434,9 +9493,9 @@ pub const FuncGen = struct { self.wip.cursor = .{ .block = memcpy_block }; _ = try self.wip.callMemCpy( dest_ptr, - dest_ptr_ty.ptrAlignment(mod).toLlvm(), + dest_ptr_ty.ptrAlignment(pt).toLlvm(), src_ptr, - src_ptr_ty.ptrAlignment(mod).toLlvm(), + src_ptr_ty.ptrAlignment(pt).toLlvm(), len, access_kind, ); @@ -9447,9 +9506,9 @@ pub const FuncGen = struct { _ = try self.wip.callMemCpy( dest_ptr, - dest_ptr_ty.ptrAlignment(mod).toLlvm(), + dest_ptr_ty.ptrAlignment(pt).toLlvm(), src_ptr, - src_ptr_ty.ptrAlignment(mod).toLlvm(), + src_ptr_ty.ptrAlignment(pt).toLlvm(), len, access_kind, ); @@ -9458,10 +9517,11 @@ pub const FuncGen = struct { fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const un_ty = self.typeOf(bin_op.lhs).childType(mod); - const layout = un_ty.unionGetLayout(mod); + const layout = un_ty.unionGetLayout(pt); if (layout.tag_size == 0) return .none; const union_ptr = try self.resolveInst(bin_op.lhs); const new_tag = try self.resolveInst(bin_op.rhs); @@ -9479,13 +9539,13 @@ pub const FuncGen = struct { fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const un_ty = self.typeOf(ty_op.operand); - const layout = un_ty.unionGetLayout(mod); + const layout = un_ty.unionGetLayout(pt); if (layout.tag_size == 0) return .none; const union_handle = try self.resolveInst(ty_op.operand); - if (isByRef(un_ty, mod)) { + if (isByRef(un_ty, pt)) { const llvm_un_ty = try o.lowerType(un_ty); if (layout.payload_size == 0) return self.wip.load(.normal, llvm_un_ty, union_handle, .default, ""); @@ -9554,7 +9614,7 @@ pub const FuncGen = struct { fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_ty = self.typeOf(ty_op.operand); var bits = operand_ty.intInfo(mod).bits; @@ -9588,7 +9648,7 @@ pub const FuncGen = struct { fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ip = &mod.intern_pool; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -9638,7 +9698,8 @@ pub const FuncGen = struct { fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index { const o = self.dg.object; - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const enum_type = zcu.intern_pool.loadEnumType(enum_ty.toIntern()); // TODO: detect when the type changes and re-emit this function. @@ -9678,7 +9739,7 @@ pub const FuncGen = struct { for (0..enum_type.names.len) |field_index| { const this_tag_int_value = try o.lowerValue( - (try zcu.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), + (try pt.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), ); try wip_switch.addCase(this_tag_int_value, named_block, &wip); } @@ -9745,7 +9806,8 @@ pub const FuncGen = struct { fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolveInst(extra.a); @@ -9763,11 +9825,11 @@ pub const FuncGen = struct { defer self.gpa.free(values); for (values, 0..) |*val, i| { - const elem = try mask.elemValue(mod, i); + const elem = try mask.elemValue(pt, i); if (elem.isUndef(mod)) { val.* = try o.builder.undefConst(.i32); } else { - const int = elem.toSignedInt(mod); + const int = elem.toSignedInt(pt); const unsigned: u32 = @intCast(if (int >= 0) int else ~int + a_len); val.* = try o.builder.intConst(.i32, unsigned); } @@ -9854,7 +9916,7 @@ pub const FuncGen = struct { fn airReduce(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const target = mod.getTarget(); const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce; @@ -9964,7 +10026,8 @@ pub const FuncGen = struct { fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const result_ty = self.typeOfIndex(inst); @@ -9986,16 +10049,16 @@ pub const FuncGen = struct { if (mod.typeToPackedStruct(result_ty)) |struct_type| { const backing_int_ty = struct_type.backingIntType(ip).*; assert(backing_int_ty != .none); - const big_bits = Type.fromInterned(backing_int_ty).bitSize(mod); + const big_bits = Type.fromInterned(backing_int_ty).bitSize(pt); const int_ty = try o.builder.intType(@intCast(big_bits)); comptime assert(Type.packed_struct_layout_version == 2); var running_int = try o.builder.intValue(int_ty, 0); var running_bits: u16 = 0; for (elements, struct_type.field_types.get(ip)) |elem, field_ty| { - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; const non_int_val = try self.resolveInst(elem); - const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(mod)); + const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(pt)); const small_int_ty = try o.builder.intType(ty_bit_size); const small_int_val = if (Type.fromInterned(field_ty).isPtrAtRuntime(mod)) try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "") @@ -10013,23 +10076,23 @@ pub const FuncGen = struct { assert(result_ty.containerLayout(mod) != .@"packed"); - if (isByRef(result_ty, mod)) { + if (isByRef(result_ty, pt)) { // TODO in debug builds init to undef so that the padding will be 0xaa // even if we fully populate the fields. - const alignment = result_ty.abiAlignment(mod).toLlvm(); + const alignment = result_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment); for (elements, 0..) |elem, i| { - if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; + if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = o.llvmFieldIndex(result_ty, i).?; const field_ptr = try self.wip.gepStruct(llvm_result_ty, alloca_inst, llvm_i, ""); - const field_ptr_ty = try mod.ptrType(.{ + const field_ptr_ty = try pt.ptrType(.{ .child = self.typeOf(elem).toIntern(), .flags = .{ - .alignment = result_ty.structFieldAlign(i, mod), + .alignment = result_ty.structFieldAlign(i, pt), }, }); try self.store(field_ptr, field_ptr_ty, llvm_elem, .none); @@ -10039,7 +10102,7 @@ pub const FuncGen = struct { } else { var result = try o.builder.poisonValue(llvm_result_ty); for (elements, 0..) |elem, i| { - if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; + if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = o.llvmFieldIndex(result_ty, i).?; @@ -10049,15 +10112,15 @@ pub const FuncGen = struct { } }, .Array => { - assert(isByRef(result_ty, mod)); + assert(isByRef(result_ty, pt)); const llvm_usize = try o.lowerType(Type.usize); const usize_zero = try o.builder.intValue(llvm_usize, 0); - const alignment = result_ty.abiAlignment(mod).toLlvm(); + const alignment = result_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment); const array_info = result_ty.arrayInfo(mod); - const elem_ptr_ty = try mod.ptrType(.{ + const elem_ptr_ty = try pt.ptrType(.{ .child = array_info.elem_type.toIntern(), }); @@ -10084,21 +10147,22 @@ pub const FuncGen = struct { fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = self.typeOfIndex(inst); const union_llvm_ty = try o.lowerType(union_ty); - const layout = union_ty.unionGetLayout(mod); + const layout = union_ty.unionGetLayout(pt); const union_obj = mod.typeToUnion(union_ty).?; if (union_obj.getLayout(ip) == .@"packed") { - const big_bits = union_ty.bitSize(mod); + const big_bits = union_ty.bitSize(pt); const int_llvm_ty = try o.builder.intType(@intCast(big_bits)); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); const non_int_val = try self.resolveInst(extra.init); - const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const small_int_val = if (field_ty.isPtrAtRuntime(mod)) try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "") else @@ -10110,19 +10174,19 @@ pub const FuncGen = struct { const tag_ty = union_ty.unionTagTypeHypothetical(mod); const union_field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index]; const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?; - const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); - break :blk try tag_val.intFromEnum(tag_ty, mod); + const tag_val = try pt.enumValueFieldIndex(tag_ty, enum_field_index); + break :blk try tag_val.intFromEnum(tag_ty, pt); }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { return .none; } - assert(!isByRef(union_ty, mod)); + assert(!isByRef(union_ty, pt)); var big_int_space: Value.BigIntSpace = undefined; - const tag_big_int = tag_int_val.toBigInt(&big_int_space, mod); + const tag_big_int = tag_int_val.toBigInt(&big_int_space, pt); return try o.builder.bigIntValue(union_llvm_ty, tag_big_int); } - assert(isByRef(union_ty, mod)); + assert(isByRef(union_ty, pt)); // The llvm type of the alloca will be the named LLVM union type, and will not // necessarily match the format that we need, depending on which tag is active. // We must construct the correct unnamed struct type here, in order to then set @@ -10132,14 +10196,14 @@ pub const FuncGen = struct { const llvm_payload = try self.resolveInst(extra.init); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); const field_llvm_ty = try o.lowerType(field_ty); - const field_size = field_ty.abiSize(mod); - const field_align = mod.unionFieldNormalAlignment(union_obj, extra.field_index); + const field_size = field_ty.abiSize(pt); + const field_align = pt.unionFieldNormalAlignment(union_obj, extra.field_index); const llvm_usize = try o.lowerType(Type.usize); const usize_zero = try o.builder.intValue(llvm_usize, 0); const llvm_union_ty = t: { const payload_ty = p: { - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { const padding_len = layout.payload_size; break :p try o.builder.arrayType(padding_len, .i8); } @@ -10169,7 +10233,7 @@ pub const FuncGen = struct { // Now we follow the layout as expressed above with GEP instructions to set the // tag and the payload. - const field_ptr_ty = try mod.ptrType(.{ + const field_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = .{ .alignment = field_align }, }); @@ -10195,9 +10259,9 @@ pub const FuncGen = struct { const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, &indices, ""); const tag_ty = try o.lowerType(Type.fromInterned(union_obj.enum_tag_ty)); var big_int_space: Value.BigIntSpace = undefined; - const tag_big_int = tag_int_val.toBigInt(&big_int_space, mod); + const tag_big_int = tag_int_val.toBigInt(&big_int_space, pt); const llvm_tag = try o.builder.bigIntValue(tag_ty, tag_big_int); - const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(mod).toLlvm(); + const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(pt).toLlvm(); _ = try self.wip.store(.normal, llvm_tag, field_ptr, tag_alignment); } @@ -10223,7 +10287,7 @@ pub const FuncGen = struct { // by the target. // To work around this, don't emit llvm.prefetch in this case. // See https://bugs.llvm.org/show_bug.cgi?id=21037 - const mod = o.module; + const mod = o.pt.zcu; const target = mod.getTarget(); switch (prefetch.cache) { .instruction => switch (target.cpu.arch) { @@ -10279,7 +10343,7 @@ pub const FuncGen = struct { fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const target = o.module.getTarget(); + const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -10289,7 +10353,7 @@ pub const FuncGen = struct { fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const target = o.module.getTarget(); + const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -10312,7 +10376,7 @@ pub const FuncGen = struct { fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const target = o.module.getTarget(); + const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -10322,7 +10386,7 @@ pub const FuncGen = struct { fn getErrorNameTable(self: *FuncGen) Allocator.Error!Builder.Variable.Index { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const table = o.error_name_table; if (table != .none) return table; @@ -10334,7 +10398,7 @@ pub const FuncGen = struct { variable_index.setMutability(.constant, &o.builder); variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); variable_index.setAlignment( - Type.slice_const_u8_sentinel_0.abiAlignment(mod).toLlvm(), + Type.slice_const_u8_sentinel_0.abiAlignment(pt).toLlvm(), &o.builder, ); @@ -10372,15 +10436,16 @@ pub const FuncGen = struct { can_elide_load: bool, ) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const payload_ty = opt_ty.optionalChild(mod); - if (isByRef(opt_ty, mod)) { + if (isByRef(opt_ty, pt)) { // We have a pointer and we need to return a pointer to the first field. const payload_ptr = try fg.wip.gepStruct(opt_llvm_ty, opt_handle, 0, ""); - const payload_alignment = payload_ty.abiAlignment(mod).toLlvm(); - if (isByRef(payload_ty, mod)) { + const payload_alignment = payload_ty.abiAlignment(pt).toLlvm(); + if (isByRef(payload_ty, pt)) { if (can_elide_load) return payload_ptr; @@ -10389,7 +10454,7 @@ pub const FuncGen = struct { return fg.loadTruncate(.normal, payload_ty, payload_ptr, payload_alignment); } - assert(!isByRef(payload_ty, mod)); + assert(!isByRef(payload_ty, pt)); return fg.wip.extractValue(opt_handle, &.{0}, ""); } @@ -10400,12 +10465,12 @@ pub const FuncGen = struct { non_null_bit: Builder.Value, ) !Builder.Value { const o = self.dg.object; + const pt = o.pt; const optional_llvm_ty = try o.lowerType(optional_ty); const non_null_field = try self.wip.cast(.zext, non_null_bit, .i8, ""); - const mod = o.module; - if (isByRef(optional_ty, mod)) { - const payload_alignment = optional_ty.abiAlignment(mod).toLlvm(); + if (isByRef(optional_ty, pt)) { + const payload_alignment = optional_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(optional_ty, payload_alignment); { @@ -10432,7 +10497,8 @@ pub const FuncGen = struct { field_index: u32, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const struct_ty = struct_ptr_ty.childType(mod); switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout(mod)) { @@ -10452,7 +10518,7 @@ pub const FuncGen = struct { // We have a pointer to a packed struct field that happens to be byte-aligned. // Offset our operand pointer by the correct number of bytes. - const byte_offset = @divExact(mod.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8); + const byte_offset = @divExact(pt.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8); if (byte_offset == 0) return struct_ptr; const usize_ty = try o.lowerType(Type.usize); const llvm_index = try o.builder.intValue(usize_ty, byte_offset); @@ -10470,14 +10536,14 @@ pub const FuncGen = struct { // the struct. const llvm_index = try o.builder.intValue( try o.lowerType(Type.usize), - @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), + @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(pt)), ); return self.wip.gep(.inbounds, struct_llvm_ty, struct_ptr, &.{llvm_index}, ""); } }, }, .Union => { - const layout = struct_ty.unionGetLayout(mod); + const layout = struct_ty.unionGetLayout(pt); if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .@"packed") return struct_ptr; const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align)); const union_llvm_ty = try o.lowerType(struct_ty); @@ -10500,9 +10566,10 @@ pub const FuncGen = struct { // => so load the byte aligned value and trunc the unwanted bits. const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const payload_llvm_ty = try o.lowerType(payload_ty); - const abi_size = payload_ty.abiSize(mod); + const abi_size = payload_ty.abiSize(pt); // llvm bug workarounds: const workaround_explicit_mask = o.target.cpu.arch == .powerpc and abi_size >= 4; @@ -10522,7 +10589,7 @@ pub const FuncGen = struct { const shifted = if (payload_llvm_ty != load_llvm_ty and o.target.cpu.arch.endian() == .big) try fg.wip.bin(.lshr, loaded, try o.builder.intValue( load_llvm_ty, - (payload_ty.abiSize(mod) - (std.math.divCeil(u64, payload_ty.bitSize(mod), 8) catch unreachable)) * 8, + (payload_ty.abiSize(pt) - (std.math.divCeil(u64, payload_ty.bitSize(pt), 8) catch unreachable)) * 8, ), "") else loaded; @@ -10546,11 +10613,11 @@ pub const FuncGen = struct { access_kind: Builder.MemoryAccessKind, ) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; //const pointee_llvm_ty = try o.lowerType(pointee_type); - const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(mod)).toLlvm(); + const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(pt)).toLlvm(); const result_ptr = try fg.buildAllocaWorkaround(pointee_type, result_align); - const size_bytes = pointee_type.abiSize(mod); + const size_bytes = pointee_type.abiSize(pt); _ = try fg.wip.callMemCpy( result_ptr, result_align, @@ -10567,15 +10634,16 @@ pub const FuncGen = struct { /// For isByRef=false types, it creates a load instruction and returns it. fn load(self: *FuncGen, ptr: Builder.Value, ptr_ty: Type) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const info = ptr_ty.ptrInfo(mod); const elem_ty = Type.fromInterned(info.child); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; const ptr_alignment = (if (info.flags.alignment != .none) @as(InternPool.Alignment, info.flags.alignment) else - elem_ty.abiAlignment(mod)).toLlvm(); + elem_ty.abiAlignment(pt)).toLlvm(); const access_kind: Builder.MemoryAccessKind = if (info.flags.is_volatile) .@"volatile" else .normal; @@ -10591,7 +10659,7 @@ pub const FuncGen = struct { } if (info.packed_offset.host_size == 0) { - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { return self.loadByRef(ptr, elem_ty, ptr_alignment, access_kind); } return self.loadTruncate(access_kind, elem_ty, ptr, ptr_alignment); @@ -10601,13 +10669,13 @@ pub const FuncGen = struct { const containing_int = try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, ""); - const elem_bits = ptr_ty.childType(mod).bitSize(mod); + const elem_bits = ptr_ty.childType(mod).bitSize(pt); const shift_amt = try o.builder.intValue(containing_int_ty, info.packed_offset.bit_offset); const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, ""); const elem_llvm_ty = try o.lowerType(elem_ty); - if (isByRef(elem_ty, mod)) { - const result_align = elem_ty.abiAlignment(mod).toLlvm(); + if (isByRef(elem_ty, pt)) { + const result_align = elem_ty.abiAlignment(pt).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(elem_ty, result_align); const same_size_int = try o.builder.intType(@intCast(elem_bits)); @@ -10639,13 +10707,14 @@ pub const FuncGen = struct { ordering: Builder.AtomicOrdering, ) !void { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const info = ptr_ty.ptrInfo(mod); const elem_ty = Type.fromInterned(info.child); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { return; } - const ptr_alignment = ptr_ty.ptrAlignment(mod).toLlvm(); + const ptr_alignment = ptr_ty.ptrAlignment(pt).toLlvm(); const access_kind: Builder.MemoryAccessKind = if (info.flags.is_volatile) .@"volatile" else .normal; @@ -10669,7 +10738,7 @@ pub const FuncGen = struct { assert(ordering == .none); const containing_int = try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, ""); - const elem_bits = ptr_ty.childType(mod).bitSize(mod); + const elem_bits = ptr_ty.childType(mod).bitSize(pt); const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset); // Convert to equally-sized integer type in order to perform the bit // operations on the value to store @@ -10704,7 +10773,7 @@ pub const FuncGen = struct { _ = try self.wip.store(access_kind, ored_value, ptr, ptr_alignment); return; } - if (!isByRef(elem_ty, mod)) { + if (!isByRef(elem_ty, pt)) { _ = try self.wip.storeAtomic( access_kind, elem, @@ -10720,8 +10789,8 @@ pub const FuncGen = struct { ptr, ptr_alignment, elem, - elem_ty.abiAlignment(mod).toLlvm(), - try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(mod)), + elem_ty.abiAlignment(pt).toLlvm(), + try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(pt)), access_kind, ); } @@ -10747,12 +10816,13 @@ pub const FuncGen = struct { a5: Builder.Value, ) Allocator.Error!Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const target = mod.getTarget(); if (!target_util.hasValgrindSupport(target)) return default_value; const llvm_usize = try o.lowerType(Type.usize); - const usize_alignment = Type.usize.abiAlignment(mod).toLlvm(); + const usize_alignment = Type.usize.abiAlignment(pt).toLlvm(); const array_llvm_ty = try o.builder.arrayType(6, llvm_usize); const array_ptr = if (fg.valgrind_client_request_array == .none) a: { @@ -10813,13 +10883,13 @@ pub const FuncGen = struct { fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; return fg.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; return fg.air.typeOfIndex(inst, &mod.intern_pool); } }; @@ -10990,12 +11060,12 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ }; } -fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool { - if (isByRef(ty, zcu)) { +fn returnTypeByRef(pt: Zcu.PerThread, target: std.Target, ty: Type) bool { + if (isByRef(ty, pt)) { return true; } else if (target.cpu.arch.isX86() and !std.Target.x86.featureSetHas(target.cpu.features, .evex512) and - ty.totalVectorBits(zcu) >= 512) + ty.totalVectorBits(pt) >= 512) { // As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns // "512-bit vector arguments require 'evex512' for AVX512" @@ -11005,38 +11075,38 @@ fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool { } } -fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Target) bool { +fn firstParamSRet(fn_info: InternPool.Key.FuncType, pt: Zcu.PerThread, target: std.Target) bool { const return_type = Type.fromInterned(fn_info.return_type); - if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) return false; + if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) return false; return switch (fn_info.cc) { - .Unspecified, .Inline => returnTypeByRef(zcu, target, return_type), + .Unspecified, .Inline => returnTypeByRef(pt, target, return_type), .C => switch (target.cpu.arch) { .mips, .mipsel => false, - .x86 => isByRef(return_type, zcu), + .x86 => isByRef(return_type, pt), .x86_64 => switch (target.os.tag) { - .windows => x86_64_abi.classifyWindows(return_type, zcu) == .memory, - else => firstParamSRetSystemV(return_type, zcu, target), + .windows => x86_64_abi.classifyWindows(return_type, pt) == .memory, + else => firstParamSRetSystemV(return_type, pt, target), }, - .wasm32 => wasm_c_abi.classifyType(return_type, zcu)[0] == .indirect, - .aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, zcu) == .memory, - .arm, .armeb => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) { + .wasm32 => wasm_c_abi.classifyType(return_type, pt)[0] == .indirect, + .aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, pt) == .memory, + .arm, .armeb => switch (arm_c_abi.classifyType(return_type, pt, .ret)) { .memory, .i64_array => true, .i32_array => |size| size != 1, .byval => false, }, - .riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, zcu) == .memory, + .riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, pt) == .memory, else => false, // TODO investigate C ABI for other architectures }, - .SysV => firstParamSRetSystemV(return_type, zcu, target), - .Win64 => x86_64_abi.classifyWindows(return_type, zcu) == .memory, - .Stdcall => !isScalar(zcu, return_type), + .SysV => firstParamSRetSystemV(return_type, pt, target), + .Win64 => x86_64_abi.classifyWindows(return_type, pt) == .memory, + .Stdcall => !isScalar(pt.zcu, return_type), else => false, }; } -fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool { - const class = x86_64_abi.classifySystemV(ty, zcu, target, .ret); +fn firstParamSRetSystemV(ty: Type, pt: Zcu.PerThread, target: std.Target) bool { + const class = x86_64_abi.classifySystemV(ty, pt, target, .ret); if (class[0] == .memory) return true; if (class[0] == .x87 and class[2] != .none) return true; return false; @@ -11046,9 +11116,10 @@ fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool { /// completely differently in the function prototype to honor the C ABI, and then /// be effectively bitcasted to the actual return type. fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const return_type = Type.fromInterned(fn_info.return_type); - if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. @@ -11058,12 +11129,12 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu switch (fn_info.cc) { .Unspecified, .Inline, - => return if (returnTypeByRef(mod, target, return_type)) .void else o.lowerType(return_type), + => return if (returnTypeByRef(pt, target, return_type)) .void else o.lowerType(return_type), .C => { switch (target.cpu.arch) { .mips, .mipsel => return o.lowerType(return_type), - .x86 => return if (isByRef(return_type, mod)) .void else o.lowerType(return_type), + .x86 => return if (isByRef(return_type, pt)) .void else o.lowerType(return_type), .x86_64 => switch (target.os.tag) { .windows => return lowerWin64FnRetTy(o, fn_info), else => return lowerSystemVFnRetTy(o, fn_info), @@ -11072,36 +11143,36 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu if (isScalar(mod, return_type)) { return o.lowerType(return_type); } - const classes = wasm_c_abi.classifyType(return_type, mod); + const classes = wasm_c_abi.classifyType(return_type, pt); if (classes[0] == .indirect or classes[0] == .none) { return .void; } assert(classes[0] == .direct and classes[1] == .none); - const scalar_type = wasm_c_abi.scalarType(return_type, mod); - return o.builder.intType(@intCast(scalar_type.abiSize(mod) * 8)); + const scalar_type = wasm_c_abi.scalarType(return_type, pt); + return o.builder.intType(@intCast(scalar_type.abiSize(pt) * 8)); }, .aarch64, .aarch64_be => { - switch (aarch64_c_abi.classifyType(return_type, mod)) { + switch (aarch64_c_abi.classifyType(return_type, pt)) { .memory => return .void, .float_array => return o.lowerType(return_type), .byval => return o.lowerType(return_type), - .integer => return o.builder.intType(@intCast(return_type.bitSize(mod))), + .integer => return o.builder.intType(@intCast(return_type.bitSize(pt))), .double_integer => return o.builder.arrayType(2, .i64), } }, .arm, .armeb => { - switch (arm_c_abi.classifyType(return_type, mod, .ret)) { + switch (arm_c_abi.classifyType(return_type, pt, .ret)) { .memory, .i64_array => return .void, .i32_array => |len| return if (len == 1) .i32 else .void, .byval => return o.lowerType(return_type), } }, .riscv32, .riscv64 => { - switch (riscv_c_abi.classifyType(return_type, mod)) { + switch (riscv_c_abi.classifyType(return_type, pt)) { .memory => return .void, .integer => { - return o.builder.intType(@intCast(return_type.bitSize(mod))); + return o.builder.intType(@intCast(return_type.bitSize(pt))); }, .double_integer => { return o.builder.structType(.normal, &.{ .i64, .i64 }); @@ -11112,7 +11183,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu var types: [8]Builder.Type = undefined; for (0..return_type.structFieldCount(mod)) |field_index| { const field_ty = return_type.structFieldType(field_index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; types[types_len] = try o.lowerType(field_ty); types_len += 1; } @@ -11132,14 +11203,14 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu } fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; const return_type = Type.fromInterned(fn_info.return_type); - switch (x86_64_abi.classifyWindows(return_type, mod)) { + switch (x86_64_abi.classifyWindows(return_type, pt)) { .integer => { - if (isScalar(mod, return_type)) { + if (isScalar(pt.zcu, return_type)) { return o.lowerType(return_type); } else { - return o.builder.intType(@intCast(return_type.abiSize(mod) * 8)); + return o.builder.intType(@intCast(return_type.abiSize(pt) * 8)); } }, .win_i128 => return o.builder.vectorType(.normal, 2, .i64), @@ -11150,14 +11221,15 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Err } fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const return_type = Type.fromInterned(fn_info.return_type); if (isScalar(mod, return_type)) { return o.lowerType(return_type); } const target = mod.getTarget(); - const classes = x86_64_abi.classifySystemV(return_type, mod, target, .ret); + const classes = x86_64_abi.classifySystemV(return_type, pt, target, .ret); if (classes[0] == .memory) return .void; var types_index: u32 = 0; var types_buffer: [8]Builder.Type = undefined; @@ -11249,8 +11321,7 @@ const ParamTypeIterator = struct { pub fn next(it: *ParamTypeIterator) Allocator.Error!?Lowering { if (it.zig_index >= it.fn_info.param_types.len) return null; - const zcu = it.object.module; - const ip = &zcu.intern_pool; + const ip = &it.object.pt.zcu.intern_pool; const ty = it.fn_info.param_types.get(ip)[it.zig_index]; it.byval_attr = false; return nextInner(it, Type.fromInterned(ty)); @@ -11258,8 +11329,7 @@ const ParamTypeIterator = struct { /// `airCall` uses this instead of `next` so that it can take into account variadic functions. pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) Allocator.Error!?Lowering { - const zcu = it.object.module; - const ip = &zcu.intern_pool; + const ip = &it.object.pt.zcu.intern_pool; if (it.zig_index >= it.fn_info.param_types.len) { if (it.zig_index >= args.len) { return null; @@ -11272,10 +11342,11 @@ const ParamTypeIterator = struct { } fn nextInner(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering { - const zcu = it.object.module; + const pt = it.object.pt; + const zcu = pt.zcu; const target = zcu.getTarget(); - if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { it.zig_index += 1; return .no_bits; } @@ -11288,11 +11359,11 @@ const ParamTypeIterator = struct { { it.llvm_index += 1; return .slice; - } else if (isByRef(ty, zcu)) { + } else if (isByRef(ty, pt)) { return .byref; } else if (target.cpu.arch.isX86() and !std.Target.x86.featureSetHas(target.cpu.features, .evex512) and - ty.totalVectorBits(zcu) >= 512) + ty.totalVectorBits(pt) >= 512) { // As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns // "512-bit vector arguments require 'evex512' for AVX512" @@ -11320,7 +11391,7 @@ const ParamTypeIterator = struct { if (isScalar(zcu, ty)) { return .byval; } - const classes = wasm_c_abi.classifyType(ty, zcu); + const classes = wasm_c_abi.classifyType(ty, pt); if (classes[0] == .indirect) { return .byref; } @@ -11329,7 +11400,7 @@ const ParamTypeIterator = struct { .aarch64, .aarch64_be => { it.zig_index += 1; it.llvm_index += 1; - switch (aarch64_c_abi.classifyType(ty, zcu)) { + switch (aarch64_c_abi.classifyType(ty, pt)) { .memory => return .byref_mut, .float_array => |len| return Lowering{ .float_array = len }, .byval => return .byval, @@ -11344,7 +11415,7 @@ const ParamTypeIterator = struct { .arm, .armeb => { it.zig_index += 1; it.llvm_index += 1; - switch (arm_c_abi.classifyType(ty, zcu, .arg)) { + switch (arm_c_abi.classifyType(ty, pt, .arg)) { .memory => { it.byval_attr = true; return .byref; @@ -11359,7 +11430,7 @@ const ParamTypeIterator = struct { it.llvm_index += 1; if (ty.toIntern() == .f16_type and !std.Target.riscv.featureSetHas(target.cpu.features, .d)) return .as_u16; - switch (riscv_c_abi.classifyType(ty, zcu)) { + switch (riscv_c_abi.classifyType(ty, pt)) { .memory => return .byref_mut, .byval => return .byval, .integer => return .abi_sized_int, @@ -11368,7 +11439,7 @@ const ParamTypeIterator = struct { it.types_len = 0; for (0..ty.structFieldCount(zcu)) |field_index| { const field_ty = ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; it.types_buffer[it.types_len] = try it.object.lowerType(field_ty); it.types_len += 1; } @@ -11406,10 +11477,10 @@ const ParamTypeIterator = struct { } fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering { - const zcu = it.object.module; - switch (x86_64_abi.classifyWindows(ty, zcu)) { + const pt = it.object.pt; + switch (x86_64_abi.classifyWindows(ty, pt)) { .integer => { - if (isScalar(zcu, ty)) { + if (isScalar(pt.zcu, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -11439,17 +11510,17 @@ const ParamTypeIterator = struct { } fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering { - const zcu = it.object.module; - const ip = &zcu.intern_pool; - const target = zcu.getTarget(); - const classes = x86_64_abi.classifySystemV(ty, zcu, target, .arg); + const pt = it.object.pt; + const ip = &pt.zcu.intern_pool; + const target = pt.zcu.getTarget(); + const classes = x86_64_abi.classifySystemV(ty, pt, target, .arg); if (classes[0] == .memory) { it.zig_index += 1; it.llvm_index += 1; it.byval_attr = true; return .byref; } - if (isScalar(zcu, ty)) { + if (isScalar(pt.zcu, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -11550,7 +11621,7 @@ fn iterateParamTypes(object: *Object, fn_info: InternPool.Key.FuncType) ParamTyp fn ccAbiPromoteInt( cc: std.builtin.CallingConvention, - mod: *Module, + mod: *Zcu, ty: Type, ) ?std.builtin.Signedness { const target = mod.getTarget(); @@ -11598,13 +11669,13 @@ fn ccAbiPromoteInt( /// This is the one source of truth for whether a type is passed around as an LLVM pointer, /// or as an LLVM value. -fn isByRef(ty: Type, mod: *Module) bool { +fn isByRef(ty: Type, pt: Zcu.PerThread) bool { // For tuples and structs, if there are more than this many non-void // fields, then we make it byref, otherwise byval. const max_fields_byval = 0; - const ip = &mod.intern_pool; + const ip = &pt.zcu.intern_pool; - switch (ty.zigTypeTag(mod)) { + switch (ty.zigTypeTag(pt.zcu)) { .Type, .ComptimeInt, .ComptimeFloat, @@ -11627,17 +11698,17 @@ fn isByRef(ty: Type, mod: *Module) bool { .AnyFrame, => return false, - .Array, .Frame => return ty.hasRuntimeBits(mod), + .Array, .Frame => return ty.hasRuntimeBits(pt), .Struct => { const struct_type = switch (ip.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { var count: usize = 0; for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue; + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; count += 1; if (count > max_fields_byval) return true; - if (isByRef(Type.fromInterned(field_ty), mod)) return true; + if (isByRef(Type.fromInterned(field_ty), pt)) return true; } return false; }, @@ -11655,27 +11726,27 @@ fn isByRef(ty: Type, mod: *Module) bool { count += 1; if (count > max_fields_byval) return true; const field_ty = Type.fromInterned(field_types[field_index]); - if (isByRef(field_ty, mod)) return true; + if (isByRef(field_ty, pt)) return true; } return false; }, - .Union => switch (ty.containerLayout(mod)) { + .Union => switch (ty.containerLayout(pt.zcu)) { .@"packed" => return false, - else => return ty.hasRuntimeBits(mod), + else => return ty.hasRuntimeBits(pt), }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const payload_ty = ty.errorUnionPayload(pt.zcu); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return false; } return true; }, .Optional => { - const payload_ty = ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const payload_ty = ty.optionalChild(pt.zcu); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return false; } - if (ty.optionalReprIsPayload(mod)) { + if (ty.optionalReprIsPayload(pt.zcu)) { return false; } return true; @@ -11683,7 +11754,7 @@ fn isByRef(ty: Type, mod: *Module) bool { } } -fn isScalar(mod: *Module, ty: Type) bool { +fn isScalar(mod: *Zcu, ty: Type) bool { return switch (ty.zigTypeTag(mod)) { .Void, .Bool, @@ -11774,7 +11845,7 @@ const lt_errors_fn_name = "__zig_lt_errors_len"; /// Without this workaround, LLVM crashes with "unknown codeview register H1" /// https://github.com/llvm/llvm-project/issues/56484 fn needDbgVarWorkaround(o: *Object) bool { - const target = o.module.getTarget(); + const target = o.pt.zcu.getTarget(); if (target.os.tag == .windows and target.cpu.arch == .aarch64) { return true; } @@ -11817,14 +11888,14 @@ fn buildAllocaInner( return wip.conv(.unneeded, alloca, .ptr, ""); } -fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) !u1 { - const err_int_ty = try mod.errorIntType(); - return @intFromBool(err_int_ty.abiAlignment(mod).compare(.gt, payload_ty.abiAlignment(mod))); +fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) !u1 { + const err_int_ty = try pt.errorIntType(); + return @intFromBool(err_int_ty.abiAlignment(pt).compare(.gt, payload_ty.abiAlignment(pt))); } -fn errUnionErrorOffset(payload_ty: Type, mod: *Module) !u1 { - const err_int_ty = try mod.errorIntType(); - return @intFromBool(err_int_ty.abiAlignment(mod).compare(.lte, payload_ty.abiAlignment(mod))); +fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) !u1 { + const err_int_ty = try pt.errorIntType(); + return @intFromBool(err_int_ty.abiAlignment(pt).compare(.lte, payload_ty.abiAlignment(pt))); } /// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location -- cgit v1.2.3 From ca02266157ee72e41068672c8ca6f928fcbf6fdf Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 15 Jun 2024 19:57:47 -0400 Subject: Zcu: pass `PerThread` to intern pool string functions --- src/Compilation.zig | 89 +++--- src/InternPool.zig | 21 +- src/Sema.zig | 262 ++++++++-------- src/Value.zig | 4 +- src/Zcu.zig | 687 +--------------------------------------- src/Zcu/PerThread.zig | 725 +++++++++++++++++++++++++++++++++++++++++-- src/arch/wasm/CodeGen.zig | 10 +- src/codegen.zig | 2 +- src/codegen/llvm.zig | 30 +- src/codegen/spirv.zig | 8 +- src/link.zig | 10 +- src/link/C.zig | 4 +- src/link/Coff.zig | 10 +- src/link/Dwarf.zig | 2 +- src/link/Elf.zig | 6 +- src/link/Elf/ZigObject.zig | 16 +- src/link/MachO.zig | 6 +- src/link/MachO/ZigObject.zig | 17 +- src/link/Plan9.zig | 14 +- src/link/Wasm.zig | 11 +- src/link/Wasm/ZigObject.zig | 52 ++-- src/mutable_value.zig | 2 +- 22 files changed, 1025 insertions(+), 963 deletions(-) (limited to 'src/codegen/llvm.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index d3ff338080..1f4c425bc5 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -29,8 +29,6 @@ const wasi_libc = @import("wasi_libc.zig"); const fatal = @import("main.zig").fatal; const clangMain = @import("main.zig").clangMain; const Zcu = @import("Zcu.zig"); -/// Deprecated; use `Zcu`. -const Module = Zcu; const Sema = @import("Sema.zig"); const InternPool = @import("InternPool.zig"); const Cache = std.Build.Cache; @@ -50,7 +48,7 @@ gpa: Allocator, arena: Allocator, /// Not every Compilation compiles .zig code! For example you could do `zig build-exe foo.o`. /// TODO: rename to zcu: ?*Zcu -module: ?*Module, +module: ?*Zcu, /// Contains different state depending on whether the Compilation uses /// incremental or whole cache mode. cache_use: CacheUse, @@ -120,7 +118,7 @@ astgen_work_queue: std.fifo.LinearFifo(Zcu.File.Index, .Dynamic), /// These jobs are to inspect the file system stat() and if the embedded file has changed /// on disk, mark the corresponding Decl outdated and queue up an `analyze_decl` /// task for it. -embed_file_work_queue: std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic), +embed_file_work_queue: std.fifo.LinearFifo(*Zcu.EmbedFile, .Dynamic), /// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator. /// This data is accessed by multiple threads and is protected by `mutex`. @@ -252,7 +250,7 @@ pub const Emit = struct { }; pub const default_stack_protector_buffer_size = target_util.default_stack_protector_buffer_size; -pub const SemaError = Module.SemaError; +pub const SemaError = Zcu.SemaError; pub const CRTFile = struct { lock: Cache.Lock, @@ -1138,7 +1136,7 @@ pub const CreateOptions = struct { pdb_source_path: ?[]const u8 = null, /// (Windows) PDB output path pdb_out_path: ?[]const u8 = null, - error_limit: ?Compilation.Module.ErrorInt = null, + error_limit: ?Zcu.ErrorInt = null, global_cc_argv: []const []const u8 = &.{}, pub const Entry = link.File.OpenOptions.Entry; @@ -1344,7 +1342,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil const main_mod = options.main_mod orelse options.root_mod; const comp = try arena.create(Compilation); - const opt_zcu: ?*Module = if (have_zcu) blk: { + const opt_zcu: ?*Zcu = if (have_zcu) blk: { // Pre-open the directory handles for cached ZIR code so that it does not need // to redundantly happen for each AstGen operation. const zir_sub_dir = "z"; @@ -1362,8 +1360,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .path = try options.global_cache_directory.join(arena, &[_][]const u8{zir_sub_dir}), }; - const emit_h: ?*Module.GlobalEmitH = if (options.emit_h) |loc| eh: { - const eh = try arena.create(Module.GlobalEmitH); + const emit_h: ?*Zcu.GlobalEmitH = if (options.emit_h) |loc| eh: { + const eh = try arena.create(Zcu.GlobalEmitH); eh.* = .{ .loc = loc }; break :eh eh; } else null; @@ -1386,7 +1384,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .builtin_modules = null, // `builtin_mod` is set }); - const zcu = try arena.create(Module); + const zcu = try arena.create(Zcu); zcu.* = .{ .gpa = gpa, .comp = comp, @@ -1434,7 +1432,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa), .win32_resource_work_queue = if (build_options.only_core_functionality) {} else std.fifo.LinearFifo(*Win32Resource, .Dynamic).init(gpa), .astgen_work_queue = std.fifo.LinearFifo(Zcu.File.Index, .Dynamic).init(gpa), - .embed_file_work_queue = std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic).init(gpa), + .embed_file_work_queue = std.fifo.LinearFifo(*Zcu.EmbedFile, .Dynamic).init(gpa), .c_source_files = options.c_source_files, .rc_source_files = options.rc_source_files, .cache_parent = cache, @@ -2626,7 +2624,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { var num_errors: u32 = 0; const max_errors = 5; // Attach the "some omitted" note to the final error message - var last_err: ?*Module.ErrorMsg = null; + var last_err: ?*Zcu.ErrorMsg = null; for (zcu.import_table.values(), 0..) |file, file_index_usize| { if (!file.multi_pkg) continue; @@ -2642,13 +2640,13 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { const omitted = file.references.items.len -| max_notes; const num_notes = file.references.items.len - omitted; - const notes = try gpa.alloc(Module.ErrorMsg, if (omitted > 0) num_notes + 1 else num_notes); + const notes = try gpa.alloc(Zcu.ErrorMsg, if (omitted > 0) num_notes + 1 else num_notes); errdefer gpa.free(notes); for (notes[0..num_notes], file.references.items[0..num_notes], 0..) |*note, ref, i| { errdefer for (notes[0..i]) |*n| n.deinit(gpa); note.* = switch (ref) { - .import => |import| try Module.ErrorMsg.init( + .import => |import| try Zcu.ErrorMsg.init( gpa, .{ .base_node_inst = try ip.trackZir(gpa, import.file, .main_struct_inst), @@ -2657,7 +2655,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { "imported from module {s}", .{zcu.fileByIndex(import.file).mod.fully_qualified_name}, ), - .root => |pkg| try Module.ErrorMsg.init( + .root => |pkg| try Zcu.ErrorMsg.init( gpa, .{ .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), @@ -2671,7 +2669,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { errdefer for (notes[0..num_notes]) |*n| n.deinit(gpa); if (omitted > 0) { - notes[num_notes] = try Module.ErrorMsg.init( + notes[num_notes] = try Zcu.ErrorMsg.init( gpa, .{ .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), @@ -2683,7 +2681,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { } errdefer if (omitted > 0) notes[num_notes].deinit(gpa); - const err = try Module.ErrorMsg.create( + const err = try Zcu.ErrorMsg.create( gpa, .{ .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), @@ -2706,7 +2704,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { // There isn't really any meaningful place to put this note, so just attach it to the // last failed file - var note = try Module.ErrorMsg.init( + var note = try Zcu.ErrorMsg.init( gpa, err.src_loc, "{} more errors omitted", @@ -3095,10 +3093,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { const values = zcu.compile_log_sources.values(); // First one will be the error; subsequent ones will be notes. const src_loc = values[0].src(); - const err_msg: Module.ErrorMsg = .{ + const err_msg: Zcu.ErrorMsg = .{ .src_loc = src_loc, .msg = "found compile log statement", - .notes = try gpa.alloc(Module.ErrorMsg, zcu.compile_log_sources.count() - 1), + .notes = try gpa.alloc(Zcu.ErrorMsg, zcu.compile_log_sources.count() - 1), }; defer gpa.free(err_msg.notes); @@ -3166,9 +3164,9 @@ pub const ErrorNoteHashContext = struct { }; pub fn addModuleErrorMsg( - mod: *Module, + mod: *Zcu, eb: *ErrorBundle.Wip, - module_err_msg: Module.ErrorMsg, + module_err_msg: Zcu.ErrorMsg, all_references: *const std.AutoHashMapUnmanaged(InternPool.AnalUnit, Zcu.ResolvedReference), ) !void { const gpa = eb.gpa; @@ -3299,7 +3297,7 @@ pub fn addModuleErrorMsg( } } -pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { +pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Zcu.File) !void { assert(file.zir_loaded); assert(file.tree_loaded); assert(file.source_loaded); @@ -3378,7 +3376,7 @@ pub fn performAllTheWork( const path_digest = zcu.filePathDigest(file_index); const root_decl = zcu.fileRootDecl(file_index); const file = zcu.fileByIndex(file_index); - comp.thread_pool.spawnWg(&comp.astgen_wait_group, workerAstGenFile, .{ + comp.thread_pool.spawnWgId(&comp.astgen_wait_group, workerAstGenFile, .{ comp, file, file_index, path_digest, root_decl, zir_prog_node, &comp.astgen_wait_group, .root, }); } @@ -3587,22 +3585,22 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre defer named_frame.end(); const gpa = comp.gpa; - const zcu = comp.module.?; - const decl = zcu.declPtr(decl_index); + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + const decl = pt.zcu.declPtr(decl_index); const lf = comp.bin_file.?; - lf.updateDeclLineNumber(zcu, decl_index) catch |err| { - try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); - zcu.failed_analysis.putAssumeCapacityNoClobber( + lf.updateDeclLineNumber(pt, decl_index) catch |err| { + try pt.zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + pt.zcu.failed_analysis.putAssumeCapacityNoClobber( InternPool.AnalUnit.wrap(.{ .decl = decl_index }), try Zcu.ErrorMsg.create( gpa, - decl.navSrcLoc(zcu), + decl.navSrcLoc(pt.zcu), "unable to update line number: {s}", .{@errorName(err)}, ), ); decl.analysis = .codegen_failure; - try zcu.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + try pt.zcu.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); }; }, .analyze_mod => |mod| { @@ -4049,6 +4047,7 @@ const AstGenSrc = union(enum) { }; fn workerAstGenFile( + tid: usize, comp: *Compilation, file: *Zcu.File, file_index: Zcu.File.Index, @@ -4061,8 +4060,8 @@ fn workerAstGenFile( const child_prog_node = prog_node.start(file.sub_file_path, 0); defer child_prog_node.end(); - const zcu = comp.module.?; - zcu.astGenFile(file, file_index, path_digest, root_decl) catch |err| switch (err) { + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + pt.astGenFile(file, file_index, path_digest, root_decl) catch |err| switch (err) { error.AnalysisFail => return, else => { file.status = .retryable_failure; @@ -4097,15 +4096,15 @@ fn workerAstGenFile( comp.mutex.lock(); defer comp.mutex.unlock(); - const res = zcu.importFile(file, import_path) catch continue; + const res = pt.zcu.importFile(file, import_path) catch continue; if (!res.is_pkg) { - res.file.addReference(zcu.*, .{ .import = .{ + res.file.addReference(pt.zcu.*, .{ .import = .{ .file = file_index, .token = item.data.token, } }) catch continue; } - const imported_path_digest = zcu.filePathDigest(res.file_index); - const imported_root_decl = zcu.fileRootDecl(res.file_index); + const imported_path_digest = pt.zcu.filePathDigest(res.file_index); + const imported_root_decl = pt.zcu.fileRootDecl(res.file_index); break :blk .{ res, imported_path_digest, imported_root_decl }; }; if (import_result.is_new) { @@ -4116,7 +4115,7 @@ fn workerAstGenFile( .importing_file = file_index, .import_tok = item.data.token, } }; - comp.thread_pool.spawnWg(wg, workerAstGenFile, .{ + comp.thread_pool.spawnWgId(wg, workerAstGenFile, .{ comp, import_result.file, import_result.file_index, imported_path_digest, imported_root_decl, prog_node, wg, sub_src, }); } @@ -4127,7 +4126,7 @@ fn workerAstGenFile( fn workerUpdateBuiltinZigFile( comp: *Compilation, mod: *Package.Module, - file: *Module.File, + file: *Zcu.File, ) void { Builtin.populateFile(comp, mod, file) catch |err| { comp.mutex.lock(); @@ -4139,7 +4138,7 @@ fn workerUpdateBuiltinZigFile( }; } -fn workerCheckEmbedFile(comp: *Compilation, embed_file: *Module.EmbedFile) void { +fn workerCheckEmbedFile(comp: *Compilation, embed_file: *Zcu.EmbedFile) void { comp.detectEmbedFileUpdate(embed_file) catch |err| { comp.reportRetryableEmbedFileError(embed_file, err) catch |oom| switch (oom) { // Swallowing this error is OK because it's implied to be OOM when @@ -4150,7 +4149,7 @@ fn workerCheckEmbedFile(comp: *Compilation, embed_file: *Module.EmbedFile) void }; } -fn detectEmbedFileUpdate(comp: *Compilation, embed_file: *Module.EmbedFile) !void { +fn detectEmbedFileUpdate(comp: *Compilation, embed_file: *Zcu.EmbedFile) !void { const mod = comp.module.?; const ip = &mod.intern_pool; var file = try embed_file.owner.root.openFile(embed_file.sub_file_path.toSlice(ip), .{}); @@ -4477,7 +4476,7 @@ fn reportRetryableAstGenError( const file = zcu.fileByIndex(file_index); file.status = .retryable_failure; - const src_loc: Module.LazySrcLoc = switch (src) { + const src_loc: Zcu.LazySrcLoc = switch (src) { .root => .{ .base_node_inst = try zcu.intern_pool.trackZir(gpa, file_index, .main_struct_inst), .offset = .entire_file, @@ -4488,7 +4487,7 @@ fn reportRetryableAstGenError( }, }; - const err_msg = try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ + const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ file.mod.root, file.sub_file_path, @errorName(err), }); errdefer err_msg.destroy(gpa); @@ -4502,14 +4501,14 @@ fn reportRetryableAstGenError( fn reportRetryableEmbedFileError( comp: *Compilation, - embed_file: *Module.EmbedFile, + embed_file: *Zcu.EmbedFile, err: anyerror, ) error{OutOfMemory}!void { const mod = comp.module.?; const gpa = mod.gpa; const src_loc = embed_file.src_loc; const ip = &mod.intern_pool; - const err_msg = try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ + const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ embed_file.owner.root, embed_file.sub_file_path.toSlice(ip), @errorName(err), diff --git a/src/InternPool.zig b/src/InternPool.zig index 1338743182..97fd35bf20 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -4539,7 +4539,7 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void { assert(ip.items.len == 0); // Reserve string index 0 for an empty string. - assert((try ip.getOrPutString(gpa, "", .no_embedded_nulls)) == .empty); + assert((try ip.getOrPutString(gpa, .main, "", .no_embedded_nulls)) == .empty); // So that we can use `catch unreachable` below. try ip.items.ensureUnusedCapacity(gpa, static_keys.len); @@ -5986,6 +5986,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All ); const string = try ip.getOrPutTrailingString( gpa, + tid, @intCast(len_including_sentinel), .maybe_embedded_nulls, ); @@ -6865,6 +6866,7 @@ pub fn getFuncInstance( return finishFuncInstance( ip, gpa, + tid, generic_owner, func_index, func_extra_index, @@ -6879,7 +6881,7 @@ pub fn getFuncInstance( pub fn getFuncInstanceIes( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, arg: GetFuncInstanceKey, ) Allocator.Error!Index { // Validate input parameters. @@ -6994,6 +6996,7 @@ pub fn getFuncInstanceIes( return finishFuncInstance( ip, gpa, + tid, generic_owner, func_index, func_extra_index, @@ -7005,6 +7008,7 @@ pub fn getFuncInstanceIes( fn finishFuncInstance( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, generic_owner: Index, func_index: Index, func_extra_index: u32, @@ -7036,7 +7040,7 @@ fn finishFuncInstance( // TODO: improve this name const decl = ip.declPtr(decl_index); - decl.name = try ip.getOrPutStringFmt(gpa, "{}__anon_{d}", .{ + decl.name = try ip.getOrPutStringFmt(gpa, tid, "{}__anon_{d}", .{ fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index), }, .no_embedded_nulls); @@ -8782,18 +8786,20 @@ const EmbeddedNulls = enum { pub fn getOrPutString( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, slice: []const u8, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { try ip.string_bytes.ensureUnusedCapacity(gpa, slice.len + 1); ip.string_bytes.appendSliceAssumeCapacity(slice); ip.string_bytes.appendAssumeCapacity(0); - return ip.getOrPutTrailingString(gpa, slice.len + 1, embedded_nulls); + return ip.getOrPutTrailingString(gpa, tid, slice.len + 1, embedded_nulls); } pub fn getOrPutStringFmt( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, comptime format: []const u8, args: anytype, comptime embedded_nulls: EmbeddedNulls, @@ -8803,16 +8809,17 @@ pub fn getOrPutStringFmt( try ip.string_bytes.ensureUnusedCapacity(gpa, len); ip.string_bytes.writer(undefined).print(format, args) catch unreachable; ip.string_bytes.appendAssumeCapacity(0); - return ip.getOrPutTrailingString(gpa, len, embedded_nulls); + return ip.getOrPutTrailingString(gpa, tid, len, embedded_nulls); } pub fn getOrPutStringOpt( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, slice: ?[]const u8, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.OptionalStringType() { - const string = try getOrPutString(ip, gpa, slice orelse return .none, embedded_nulls); + const string = try getOrPutString(ip, gpa, tid, slice orelse return .none, embedded_nulls); return string.toOptional(); } @@ -8820,9 +8827,11 @@ pub fn getOrPutStringOpt( pub fn getOrPutTrailingString( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, len: usize, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { + _ = tid; const string_bytes = &ip.string_bytes; const str_index: u32 = @intCast(string_bytes.items.len - len); if (len > 0 and string_bytes.getLast() == 0) { diff --git a/src/Sema.zig b/src/Sema.zig index dd8d2712ed..ee4ac3b703 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2093,12 +2093,12 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) const st_ptr = try err_trace_block.addTy(.alloc, try pt.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; - const instruction_addresses_field_name = try ip.getOrPutString(gpa, "instruction_addresses", .no_embedded_nulls); + const instruction_addresses_field_name = try ip.getOrPutString(gpa, pt.tid, "instruction_addresses", .no_embedded_nulls); const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, instruction_addresses_field_name, src, true); try sema.storePtr2(&err_trace_block, src, addr_field_ptr, src, addrs_ptr, src, .store); // st.index = 0; - const index_field_name = try ip.getOrPutString(gpa, "index", .no_embedded_nulls); + const index_field_name = try ip.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, index_field_name, src, true); try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store); @@ -2691,6 +2691,7 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us .decl_val => |str| capture: { const decl_name = try ip.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(str), .no_embedded_nulls, ); @@ -2700,6 +2701,7 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us .decl_ref => |str| capture: { const decl_name = try ip.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(str), .no_embedded_nulls, ); @@ -2847,7 +2849,7 @@ fn zirStructDecl( if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); - try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); + try pt.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); } try pt.finalizeAnonDecl(new_decl_index); @@ -2919,7 +2921,7 @@ fn createAnonymousDeclTypeNamed( }; try writer.writeByte(')'); - const name = try ip.getOrPutString(gpa, buf.items, .no_embedded_nulls); + const name = try ip.getOrPutString(gpa, pt.tid, buf.items, .no_embedded_nulls); try zcu.initNewAnonDecl(new_decl_index, val, name); return new_decl_index; }, @@ -2931,7 +2933,7 @@ fn createAnonymousDeclTypeNamed( .dbg_var_ptr, .dbg_var_val => { if (zir_data[i].str_op.operand != ref) continue; - const name = try ip.getOrPutStringFmt(gpa, "{}.{s}", .{ + const name = try ip.getOrPutStringFmt(gpa, pt.tid, "{}.{s}", .{ block.type_name_ctx.fmt(ip), zir_data[i].str_op.getStr(sema.code), }, .no_embedded_nulls); try zcu.initNewAnonDecl(new_decl_index, val, name); @@ -2952,7 +2954,7 @@ fn createAnonymousDeclTypeNamed( // This name is also used as the key in the parent namespace so it cannot be // renamed. - const name = ip.getOrPutStringFmt(gpa, "{}__{s}_{d}", .{ + const name = ip.getOrPutStringFmt(gpa, pt.tid, "{}__{s}_{d}", .{ block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(new_decl_index), }, .no_embedded_nulls) catch unreachable; try zcu.initNewAnonDecl(new_decl_index, val, name); @@ -3084,7 +3086,7 @@ fn zirEnumDecl( errdefer if (!done) if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns); if (new_namespace_index.unwrap()) |ns| { - try mod.scanNamespace(ns, decls, new_decl); + try pt.scanNamespace(ns, decls, new_decl); } // We've finished the initial construction of this type, and are about to perform analysis. @@ -3169,7 +3171,7 @@ fn zirEnumDecl( const field_name_zir = sema.code.nullTerminatedString(field_name_index); extra_index += 2; // field name, doc comment - const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir, .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); const value_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, @@ -3352,7 +3354,7 @@ fn zirUnionDecl( if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); - try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); + try pt.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); } try pt.finalizeAnonDecl(new_decl_index); @@ -3441,7 +3443,7 @@ fn zirOpaqueDecl( if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); - try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); + try pt.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); } try pt.finalizeAnonDecl(new_decl_index); @@ -3470,7 +3472,7 @@ fn zirErrorSetDecl( while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]); const name = sema.code.nullTerminatedString(name_index); - const name_ip = try mod.intern_pool.getOrPutString(gpa, name, .no_embedded_nulls); + const name_ip = try mod.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls); _ = try mod.getErrorValue(name_ip); const result = names.getOrPutAssumeCapacity(name_ip); assert(!result.found_existing); // verified in AstGen @@ -3634,7 +3636,7 @@ fn indexablePtrLen( const is_pointer_to = object_ty.isSinglePointer(mod); const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; try checkIndexable(sema, block, src, indexable_ty); - const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls); return sema.fieldVal(block, src, object, field_name, src); } @@ -3649,7 +3651,7 @@ fn indexablePtrLenOrNone( const operand_ty = sema.typeOf(operand); try checkMemOperand(sema, block, src, operand_ty); if (operand_ty.ptrSize(mod) == .Many) return .none; - const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls); return sema.fieldVal(block, src, operand, field_name, src); } @@ -4405,7 +4407,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (!object_ty.indexableHasLen(mod)) continue; - break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), arg_src); + break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), arg_src); }; const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src); if (len == .none) { @@ -4797,6 +4799,7 @@ fn validateUnionInit( const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( gpa, + pt.tid, sema.code.nullTerminatedString(field_ptr_extra.field_name_start), .no_embedded_nulls, ); @@ -4942,6 +4945,7 @@ fn validateStructInit( struct_ptr_zir_ref = field_ptr_extra.lhs; const field_name = try ip.getOrPutString( gpa, + pt.tid, sema.code.nullTerminatedString(field_ptr_extra.field_name_start), .no_embedded_nulls, ); @@ -5518,10 +5522,11 @@ fn failWithBadStructFieldAccess( field_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, ) CompileError { - const zcu = sema.pt.zcu; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const decl = zcu.declPtr(struct_type.decl.unwrap().?); - const fqn = try decl.fullyQualifiedName(zcu); + const fqn = try decl.fullyQualifiedName(pt); const msg = msg: { const msg = try sema.errMsg( @@ -5544,12 +5549,13 @@ fn failWithBadUnionFieldAccess( field_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, ) CompileError { - const zcu = sema.pt.zcu; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = sema.gpa; const decl = zcu.declPtr(union_obj.decl); - const fqn = try decl.fullyQualifiedName(zcu); + const fqn = try decl.fullyQualifiedName(pt); const msg = msg: { const msg = try sema.errMsg( @@ -5715,7 +5721,7 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v fn zirStr(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const bytes = sema.code.instructions.items(.data)[@intFromEnum(inst)].str.get(sema.code); return sema.addStrLit( - try sema.pt.zcu.intern_pool.getOrPutString(sema.gpa, bytes, .maybe_embedded_nulls), + try sema.pt.zcu.intern_pool.getOrPutString(sema.gpa, sema.pt.tid, bytes, .maybe_embedded_nulls), bytes.len, ); } @@ -6057,7 +6063,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr const path_digest = zcu.filePathDigest(result.file_index); const root_decl = zcu.fileRootDecl(result.file_index); - zcu.astGenFile(result.file, result.file_index, path_digest, root_decl) catch |err| + pt.astGenFile(result.file, result.file_index, path_digest, root_decl) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); try pt.ensureFileAnalyzed(result.file_index); @@ -6418,6 +6424,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const options_src = block.builtinCallArgSrc(inst_data.src_node, 1); const decl_name = try mod.intern_pool.getOrPutString( mod.gpa, + pt.tid, sema.code.nullTerminatedString(extra.decl_name), .no_embedded_nulls, ); @@ -6737,6 +6744,7 @@ fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const src = block.tokenOffset(inst_data.src_tok); const decl_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -6751,6 +6759,7 @@ fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const src = block.tokenOffset(inst_data.src_tok); const decl_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -6907,7 +6916,7 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); - const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, LazySrcLoc.unneeded) catch |err| switch (err) { error.AnalysisFail => @panic("std.builtin.StackTrace is corrupt"), error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, @@ -6951,7 +6960,7 @@ fn popErrorReturnTrace( try stack_trace_ty.resolveFields(pt); const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store); } else if (is_non_error == null) { @@ -6977,7 +6986,7 @@ fn popErrorReturnTrace( try stack_trace_ty.resolveFields(pt); const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store); _ = try then_block.addBr(cond_block_inst, .void_value); @@ -7038,6 +7047,7 @@ fn zirCall( const object_ptr = try sema.resolveInst(extra.data.obj_ptr); const field_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(extra.data.field_name_start), .no_embedded_nulls, ); @@ -7103,7 +7113,7 @@ fn zirCall( if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) { const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); - const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "index", .no_embedded_nulls); const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src); // Insert a save instruction before the arg resolution + call instructions we just generated @@ -8687,6 +8697,7 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = try pt.zcu.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -8849,7 +8860,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = inst_data.get(sema.code); return Air.internedToRef((try pt.intern(.{ - .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, name, .no_embedded_nulls), + .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, name, .no_embedded_nulls), }))); } @@ -9820,7 +9831,7 @@ fn funcCommon( const func_index = try ip.getExternFunc(gpa, pt.tid, .{ .ty = func_ty, .decl = sema.owner_decl_index, - .lib_name = try mod.intern_pool.getOrPutStringOpt(gpa, opt_lib_name, .no_embedded_nulls), + .lib_name = try mod.intern_pool.getOrPutStringOpt(gpa, pt.tid, opt_lib_name, .no_embedded_nulls), }); return finishFunc( sema, @@ -10281,6 +10292,7 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(extra.field_name_start), .no_embedded_nulls, ); @@ -10300,6 +10312,7 @@ fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(extra.field_name_start), .no_embedded_nulls, ); @@ -10319,6 +10332,7 @@ fn zirStructInitFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(extra.field_name_start), .no_embedded_nulls, ); @@ -13983,6 +13997,7 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -17716,7 +17731,7 @@ fn zirBuiltinSrc( .val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, .storage = .{ - .bytes = try ip.getOrPutString(gpa, file_name, .maybe_embedded_nulls), + .bytes = try ip.getOrPutString(gpa, pt.tid, file_name, .maybe_embedded_nulls), }, } }), } }, @@ -17778,7 +17793,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Fn", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Fn", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(fn_info_decl_index); const fn_info_decl = mod.declPtr(fn_info_decl_index); @@ -17788,7 +17803,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, fn_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Param", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Param", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(param_info_decl_index); const param_info_decl = mod.declPtr(param_info_decl_index); @@ -17890,7 +17905,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Int", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Int", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(int_info_decl_index); const int_info_decl = mod.declPtr(int_info_decl_index); @@ -17918,7 +17933,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Float", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Float", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(float_info_decl_index); const float_info_decl = mod.declPtr(float_info_decl_index); @@ -17950,7 +17965,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Pointer", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Pointer", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); const decl = mod.declPtr(decl_index); @@ -17961,7 +17976,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, pointer_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Size", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Size", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); const decl = mod.declPtr(decl_index); @@ -18004,7 +18019,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Array", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Array", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(array_field_ty_decl_index); const array_field_ty_decl = mod.declPtr(array_field_ty_decl_index); @@ -18035,7 +18050,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Vector", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Vector", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(vector_field_ty_decl_index); const vector_field_ty_decl = mod.declPtr(vector_field_ty_decl_index); @@ -18064,7 +18079,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Optional", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Optional", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(optional_field_ty_decl_index); const optional_field_ty_decl = mod.declPtr(optional_field_ty_decl_index); @@ -18091,7 +18106,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Error", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Error", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(set_field_ty_decl_index); const set_field_ty_decl = mod.declPtr(set_field_ty_decl_index); @@ -18197,7 +18212,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "ErrorUnion", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "ErrorUnion", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index); const error_union_field_ty_decl = mod.declPtr(error_union_field_ty_decl_index); @@ -18227,7 +18242,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "EnumField", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "EnumField", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); const enum_field_ty_decl = mod.declPtr(enum_field_ty_decl_index); @@ -18324,7 +18339,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Enum", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Enum", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(type_enum_ty_decl_index); const type_enum_ty_decl = mod.declPtr(type_enum_ty_decl_index); @@ -18356,7 +18371,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Union", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Union", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(type_union_ty_decl_index); const type_union_ty_decl = mod.declPtr(type_union_ty_decl_index); @@ -18368,7 +18383,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "UnionField", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "UnionField", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(union_field_ty_decl_index); const union_field_ty_decl = mod.declPtr(union_field_ty_decl_index); @@ -18473,7 +18488,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); const decl = mod.declPtr(decl_index); @@ -18506,7 +18521,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Struct", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Struct", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(type_struct_ty_decl_index); const type_struct_ty_decl = mod.declPtr(type_struct_ty_decl_index); @@ -18518,7 +18533,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "StructField", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "StructField", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index); @@ -18540,7 +18555,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_name = if (anon_struct_type.names.len != 0) anon_struct_type.names.get(ip)[field_index] else - try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); + try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); const field_name_len = field_name.length(ip); const new_decl_ty = try pt.arrayType(.{ .len = field_name_len, @@ -18600,7 +18615,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_name = if (struct_type.fieldName(ip, field_index).unwrap()) |field_name| field_name else - try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); + try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); const field_name_len = field_name.length(ip); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); const field_init = struct_type.fieldInit(ip, field_index); @@ -18706,7 +18721,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); const decl = mod.declPtr(decl_index); @@ -18742,7 +18757,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Opaque", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Opaque", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index); const type_opaque_ty_decl = mod.declPtr(type_opaque_ty_decl_index); @@ -18786,7 +18801,7 @@ fn typeInfoDecls( block, src, type_info_ty.getNamespaceIndex(mod), - try mod.intern_pool.getOrPutString(gpa, "Declaration", .no_embedded_nulls), + try mod.intern_pool.getOrPutString(gpa, pt.tid, "Declaration", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(declaration_ty_decl_index); const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); @@ -19541,6 +19556,7 @@ fn zirRetErrValue( const src = block.tokenOffset(inst_data.src_tok); const err_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -20251,6 +20267,7 @@ fn zirStructInit( const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = try ip.getOrPutString( gpa, + pt.tid, sema.code.nullTerminatedString(field_type_extra.name_start), .no_embedded_nulls, ); @@ -20292,6 +20309,7 @@ fn zirStructInit( const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = try ip.getOrPutString( gpa, + pt.tid, sema.code.nullTerminatedString(field_type_extra.name_start), .no_embedded_nulls, ); @@ -20581,7 +20599,7 @@ fn structInitAnon( }, }; - field_name.* = try mod.intern_pool.getOrPutString(gpa, name, .no_embedded_nulls); + field_name.* = try mod.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls); const init = try sema.resolveInst(item.data.init); field_ty.* = sema.typeOf(init).toIntern(); @@ -20958,7 +20976,7 @@ fn zirStructInitFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp }; const aggregate_ty = wrapped_aggregate_ty.optEuBaseType(mod); const zir_field_name = sema.code.nullTerminatedString(extra.name_start); - const field_name = try ip.getOrPutString(sema.gpa, zir_field_name, .no_embedded_nulls); + const field_name = try ip.getOrPutString(sema.gpa, pt.tid, zir_field_name, .no_embedded_nulls); return sema.fieldType(block, aggregate_ty, field_name, field_name_src, ty_src); } @@ -21344,11 +21362,11 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const signedness_val = try Value.fromInterned(union_val.val).fieldValue( pt, - struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "signedness", .no_embedded_nulls)).?, + struct_type.nameIndex(ip, try ip.getOrPutString(gpa, pt.tid, "signedness", .no_embedded_nulls)).?, ); const bits_val = try Value.fromInterned(union_val.val).fieldValue( pt, - struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "bits", .no_embedded_nulls)).?, + struct_type.nameIndex(ip, try ip.getOrPutString(gpa, pt.tid, "bits", .no_embedded_nulls)).?, ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); @@ -21360,11 +21378,11 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "len", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), ).?); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "child", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const len: u32 = @intCast(try len_val.toUnsignedIntSema(pt)); @@ -21382,7 +21400,7 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const bits_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "bits", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "bits", .no_embedded_nulls), ).?); const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(pt)); @@ -21400,35 +21418,35 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const size_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "size", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "size", .no_embedded_nulls), ).?); const is_const_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_const", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_const", .no_embedded_nulls), ).?); const is_volatile_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_volatile", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_volatile", .no_embedded_nulls), ).?); const alignment_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "alignment", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "alignment", .no_embedded_nulls), ).?); const address_space_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "address_space", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "address_space", .no_embedded_nulls), ).?); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "child", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const is_allowzero_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_allowzero", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_allowzero", .no_embedded_nulls), ).?); const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "sentinel", .no_embedded_nulls), ).?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { @@ -21505,15 +21523,15 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "len", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), ).?); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "child", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "sentinel", .no_embedded_nulls), ).?); const len = try len_val.toUnsignedIntSema(pt); @@ -21534,7 +21552,7 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "child", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const child_ty = child_val.toType(); @@ -21546,11 +21564,11 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const error_set_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "error_set", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "error_set", .no_embedded_nulls), ).?); const payload_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "payload", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "payload", .no_embedded_nulls), ).?); const error_set_ty = error_set_val.toType(); @@ -21579,7 +21597,7 @@ fn zirReify( const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern())); const name_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "name", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), ).?); const name = try sema.sliceToIpString(block, src, name_val, .{ @@ -21601,23 +21619,23 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "layout", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "layout", .no_embedded_nulls), ).?); const backing_integer_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "backing_integer", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "backing_integer", .no_embedded_nulls), ).?); const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "fields", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "fields", .no_embedded_nulls), ).?); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); const is_tuple_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_tuple", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_tuple", .no_embedded_nulls), ).?); const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -21641,19 +21659,19 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "tag_type", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "tag_type", .no_embedded_nulls), ).?); const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "fields", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "fields", .no_embedded_nulls), ).?); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); const is_exhaustive_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_exhaustive", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_exhaustive", .no_embedded_nulls), ).?); if (try decls_val.sliceLen(pt) > 0) { @@ -21670,7 +21688,7 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); // Decls @@ -21707,19 +21725,19 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "layout", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "layout", .no_embedded_nulls), ).?); const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "tag_type", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "tag_type", .no_embedded_nulls), ).?); const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "fields", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "fields", .no_embedded_nulls), ).?); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); if (try decls_val.sliceLen(pt) > 0) { @@ -21737,23 +21755,23 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const calling_convention_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "calling_convention", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "calling_convention", .no_embedded_nulls), ).?); const is_generic_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_generic", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_generic", .no_embedded_nulls), ).?); const is_var_args_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_var_args", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_var_args", .no_embedded_nulls), ).?); const return_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "return_type", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "return_type", .no_embedded_nulls), ).?); const params_slice_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "params", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "params", .no_embedded_nulls), ).?); const is_generic = is_generic_val.toBool(); @@ -21783,15 +21801,15 @@ fn zirReify( const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern())); const param_is_generic_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_generic", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_generic", .no_embedded_nulls), ).?); const param_is_noalias_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_noalias", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_noalias", .no_embedded_nulls), ).?); const opt_param_type_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "type", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "type", .no_embedded_nulls), ).?); if (param_is_generic_val.toBool()) { @@ -22535,7 +22553,7 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const ty = try sema.resolveType(block, ty_src, inst_data.operand); - const type_name = try ip.getOrPutStringFmt(sema.gpa, "{}", .{ty.fmt(pt)}, .no_embedded_nulls); + const type_name = try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{}", .{ty.fmt(pt)}, .no_embedded_nulls); return sema.addNullTerminatedStrLit(type_name); } @@ -24143,18 +24161,18 @@ fn resolveExportOptions( const section_src = block.src(.{ .init_field_section = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const visibility_src = block.src(.{ .init_field_visibility = src.offset.node_offset_builtin_call_arg.builtin_call_node }); - const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name", .no_embedded_nulls), name_src); + const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), name_src); const name = try sema.toConstString(block, name_src, name_operand, .{ .needed_comptime_reason = "name of exported value must be comptime-known", }); - const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage", .no_embedded_nulls), linkage_src); + const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "linkage", .no_embedded_nulls), linkage_src); const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_operand, .{ .needed_comptime_reason = "linkage of exported value must be comptime-known", }); const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "section", .no_embedded_nulls), section_src); + const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "section", .no_embedded_nulls), section_src); const section_opt_val = try sema.resolveConstDefinedValue(block, section_src, section_operand, .{ .needed_comptime_reason = "linksection of exported value must be comptime-known", }); @@ -24165,7 +24183,7 @@ fn resolveExportOptions( else null; - const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "visibility", .no_embedded_nulls), visibility_src); + const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "visibility", .no_embedded_nulls), visibility_src); const visibility_val = try sema.resolveConstDefinedValue(block, visibility_src, visibility_operand, .{ .needed_comptime_reason = "visibility of exported value must be comptime-known", }); @@ -24182,9 +24200,9 @@ fn resolveExportOptions( } return .{ - .name = try ip.getOrPutString(gpa, name, .no_embedded_nulls), + .name = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls), .linkage = linkage, - .section = try ip.getOrPutStringOpt(gpa, section, .no_embedded_nulls), + .section = try ip.getOrPutStringOpt(gpa, pt.tid, section, .no_embedded_nulls), .visibility = visibility, }; } @@ -25821,7 +25839,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const runtime_src = rs: { const ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src; - const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), dest_src); + const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; const len_u64 = (try len_val.getUnsignedIntAdvanced(pt, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); @@ -25952,7 +25970,7 @@ fn zirVarExtended( .ty = var_ty.toIntern(), .init = init_val, .decl = sema.owner_decl_index, - .lib_name = try mod.intern_pool.getOrPutStringOpt(sema.gpa, lib_name, .no_embedded_nulls), + .lib_name = try mod.intern_pool.getOrPutStringOpt(sema.gpa, pt.tid, lib_name, .no_embedded_nulls), .is_extern = small.is_extern, .is_const = small.is_const, .is_threadlocal = small.is_threadlocal, @@ -26323,17 +26341,17 @@ fn resolvePrefetchOptions( const locality_src = block.src(.{ .init_field_locality = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const cache_src = block.src(.{ .init_field_cache = src.offset.node_offset_builtin_call_arg.builtin_call_node }); - const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "rw", .no_embedded_nulls), rw_src); + const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "rw", .no_embedded_nulls), rw_src); const rw_val = try sema.resolveConstDefinedValue(block, rw_src, rw, .{ .needed_comptime_reason = "prefetch read/write must be comptime-known", }); - const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "locality", .no_embedded_nulls), locality_src); + const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "locality", .no_embedded_nulls), locality_src); const locality_val = try sema.resolveConstDefinedValue(block, locality_src, locality, .{ .needed_comptime_reason = "prefetch locality must be comptime-known", }); - const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "cache", .no_embedded_nulls), cache_src); + const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "cache", .no_embedded_nulls), cache_src); const cache_val = try sema.resolveConstDefinedValue(block, cache_src, cache, .{ .needed_comptime_reason = "prefetch cache must be comptime-known", }); @@ -26397,23 +26415,23 @@ fn resolveExternOptions( const linkage_src = block.src(.{ .init_field_linkage = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const thread_local_src = block.src(.{ .init_field_thread_local = src.offset.node_offset_builtin_call_arg.builtin_call_node }); - const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name", .no_embedded_nulls), name_src); + const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), name_src); const name = try sema.toConstString(block, name_src, name_ref, .{ .needed_comptime_reason = "name of the extern symbol must be comptime-known", }); - const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "library_name", .no_embedded_nulls), library_src); + const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "library_name", .no_embedded_nulls), library_src); const library_name_val = try sema.resolveConstDefinedValue(block, library_src, library_name_inst, .{ .needed_comptime_reason = "library in which extern symbol is must be comptime-known", }); - const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage", .no_embedded_nulls), linkage_src); + const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "linkage", .no_embedded_nulls), linkage_src); const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_ref, .{ .needed_comptime_reason = "linkage of the extern symbol must be comptime-known", }); const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "is_thread_local", .no_embedded_nulls), thread_local_src); + const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "is_thread_local", .no_embedded_nulls), thread_local_src); const is_thread_local_val = try sema.resolveConstDefinedValue(block, thread_local_src, is_thread_local, .{ .needed_comptime_reason = "threadlocality of the extern symbol must be comptime-known", }); @@ -26438,8 +26456,8 @@ fn resolveExternOptions( } return .{ - .name = try ip.getOrPutString(gpa, name, .no_embedded_nulls), - .library_name = try ip.getOrPutStringOpt(gpa, library_name, .no_embedded_nulls), + .name = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls), + .library_name = try ip.getOrPutStringOpt(gpa, pt.tid, library_name, .no_embedded_nulls), .linkage = linkage, .is_thread_local = is_thread_local_val.toBool(), }; @@ -27052,7 +27070,7 @@ fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternP block, LazySrcLoc.unneeded, panic_messages_ty.getNamespaceIndex(mod), - try mod.intern_pool.getOrPutString(gpa, @tagName(panic_id), .no_embedded_nulls), + try mod.intern_pool.getOrPutString(gpa, pt.tid, @tagName(panic_id), .no_embedded_nulls), ) catch |err| switch (err) { error.AnalysisFail => @panic("std.builtin.panic_messages is corrupt"), error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, @@ -31745,7 +31763,7 @@ fn coerceTupleToStruct( .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) anon_struct_type.names.get(ip)[tuple_field_index] else - try ip.getOrPutStringFmt(sema.gpa, "{d}", .{tuple_field_index}, .no_embedded_nulls), + try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{tuple_field_index}, .no_embedded_nulls), .struct_type => ip.loadStructType(inst_ty.toIntern()).field_names.get(ip)[tuple_field_index], else => unreachable, }; @@ -31858,13 +31876,13 @@ fn coerceTupleToTuple( .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) anon_struct_type.names.get(ip)[field_i] else - try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}, .no_embedded_nulls), + try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_i}, .no_embedded_nulls), .struct_type => s: { const struct_type = ip.loadStructType(inst_ty.toIntern()); if (struct_type.field_names.len > 0) { break :s struct_type.field_names.get(ip)[field_i]; } else { - break :s try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}, .no_embedded_nulls); + break :s try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_i}, .no_embedded_nulls); } }, else => unreachable, @@ -34849,7 +34867,7 @@ fn resolvePeerTypesInner( const result_buf = try sema.arena.create(PeerResolveResult); result_buf.* = result; const field_name = if (is_tuple) - try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_index}, .no_embedded_nulls) + try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls) else field_names[field_index]; @@ -36066,7 +36084,7 @@ fn semaStructFields( // This string needs to outlive the ZIR code. if (opt_field_name_zir) |field_name_zir| { - const field_name = try ip.getOrPutString(gpa, field_name_zir, .no_embedded_nulls); + const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); assert(struct_type.addFieldName(ip, field_name) == null); } @@ -36567,7 +36585,7 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L } // This string needs to outlive the ZIR code. - const field_name = try ip.getOrPutString(gpa, field_name_zir, .no_embedded_nulls); + const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); if (enum_field_names.len != 0) { enum_field_names[field_i] = field_name; } @@ -36716,9 +36734,10 @@ fn generateUnionTagTypeNumbered( const new_decl_index = try mod.allocateNewDecl(block.namespace); errdefer mod.destroyDecl(new_decl_index); - const fqn = try union_owner_decl.fullyQualifiedName(mod); + const fqn = try union_owner_decl.fullyQualifiedName(pt); const name = try ip.getOrPutStringFmt( gpa, + pt.tid, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)}, .no_embedded_nulls, @@ -36764,11 +36783,12 @@ fn generateUnionTagTypeSimple( const gpa = sema.gpa; const new_decl_index = new_decl_index: { - const fqn = try union_owner_decl.fullyQualifiedName(mod); + const fqn = try union_owner_decl.fullyQualifiedName(pt); const new_decl_index = try mod.allocateNewDecl(block.namespace); errdefer mod.destroyDecl(new_decl_index); const name = try ip.getOrPutStringFmt( gpa, + pt.tid, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)}, .no_embedded_nulls, diff --git a/src/Value.zig b/src/Value.zig index 21bb207b59..e47598fe0a 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -67,7 +67,7 @@ pub fn toIpString(val: Value, ty: Type, pt: Zcu.PerThread) !InternPool.NullTermi const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(pt)); const len: usize = @intCast(ty.arrayLen(mod)); try ip.string_bytes.appendNTimes(mod.gpa, byte, len); - return ip.getOrPutTrailingString(mod.gpa, len, .no_embedded_nulls); + return ip.getOrPutTrailingString(mod.gpa, pt.tid, len, .no_embedded_nulls); }, } } @@ -118,7 +118,7 @@ fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.Null const byte: u8 = @intCast(elem_val.toUnsignedInt(pt)); ip.string_bytes.appendAssumeCapacity(byte); } - return ip.getOrPutTrailingString(gpa, len, .no_embedded_nulls); + return ip.getOrPutTrailingString(gpa, pt.tid, len, .no_embedded_nulls); } pub fn fromInterned(i: InternPool.Index) Value { diff --git a/src/Zcu.zig b/src/Zcu.zig index bfc70815df..c4ebc6a36b 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -420,11 +420,11 @@ pub const Decl = struct { return zcu.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(zcu, decl.name, writer); } - pub fn fullyQualifiedName(decl: Decl, zcu: *Zcu) !InternPool.NullTerminatedString { + pub fn fullyQualifiedName(decl: Decl, pt: Zcu.PerThread) !InternPool.NullTerminatedString { return if (decl.name_fully_qualified) decl.name else - zcu.namespacePtr(decl.src_namespace).fullyQualifiedName(zcu, decl.name); + pt.zcu.namespacePtr(decl.src_namespace).fullyQualifiedName(pt, decl.name); } pub fn typeOf(decl: Decl, zcu: *const Zcu) Type { @@ -688,9 +688,10 @@ pub const Namespace = struct { pub fn fullyQualifiedName( ns: Namespace, - zcu: *Zcu, + pt: Zcu.PerThread, name: InternPool.NullTerminatedString, ) !InternPool.NullTerminatedString { + const zcu = pt.zcu; const ip = &zcu.intern_pool; const count = count: { var count: usize = name.length(ip) + 1; @@ -723,7 +724,7 @@ pub const Namespace = struct { }; } - return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start, .no_embedded_nulls); + return ip.getOrPutTrailingString(gpa, pt.tid, ip.string_bytes.items.len - start, .no_embedded_nulls); } pub fn getType(ns: Namespace, zcu: *Zcu) Type { @@ -875,11 +876,12 @@ pub const File = struct { }; } - pub fn fullyQualifiedName(file: File, mod: *Module) !InternPool.NullTerminatedString { - const ip = &mod.intern_pool; + pub fn fullyQualifiedName(file: File, pt: Zcu.PerThread) !InternPool.NullTerminatedString { + const gpa = pt.zcu.gpa; + const ip = &pt.zcu.intern_pool; const start = ip.string_bytes.items.len; - try file.renderFullyQualifiedName(ip.string_bytes.writer(mod.gpa)); - return ip.getOrPutTrailingString(mod.gpa, ip.string_bytes.items.len - start, .no_embedded_nulls); + try file.renderFullyQualifiedName(ip.string_bytes.writer(gpa)); + return ip.getOrPutTrailingString(gpa, pt.tid, ip.string_bytes.items.len - start, .no_embedded_nulls); } pub fn fullPath(file: File, ally: Allocator) ![]u8 { @@ -2569,8 +2571,8 @@ pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { } // TODO https://github.com/ziglang/zig/issues/8643 -const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8; -const HackDataLayout = extern struct { +pub const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8; +pub const HackDataLayout = extern struct { data: [8]u8 align(@alignOf(Zir.Inst.Data)), safety_tag: u8, }; @@ -2580,291 +2582,11 @@ comptime { } } -pub fn astGenFile( - zcu: *Zcu, - file: *File, - /// This parameter is provided separately from `file` because it is not - /// safe to access `import_table` without a lock, and this index is needed - /// in the call to `updateZirRefs`. - file_index: File.Index, - path_digest: Cache.BinDigest, - opt_root_decl: Zcu.Decl.OptionalIndex, -) !void { - assert(!file.mod.isBuiltin()); - - const tracy = trace(@src()); - defer tracy.end(); - - const comp = zcu.comp; - const gpa = zcu.gpa; - - // In any case we need to examine the stat of the file to determine the course of action. - var source_file = try file.mod.root.openFile(file.sub_file_path, .{}); - defer source_file.close(); - - const stat = try source_file.stat(); - - const want_local_cache = file.mod == zcu.main_mod; - const hex_digest = Cache.binToHex(path_digest); - const cache_directory = if (want_local_cache) zcu.local_zir_cache else zcu.global_zir_cache; - const zir_dir = cache_directory.handle; - - // Determine whether we need to reload the file from disk and redo parsing and AstGen. - var lock: std.fs.File.Lock = switch (file.status) { - .never_loaded, .retryable_failure => lock: { - // First, load the cached ZIR code, if any. - log.debug("AstGen checking cache: {s} (local={}, digest={s})", .{ - file.sub_file_path, want_local_cache, &hex_digest, - }); - - break :lock .shared; - }, - .parse_failure, .astgen_failure, .success_zir => lock: { - const unchanged_metadata = - stat.size == file.stat.size and - stat.mtime == file.stat.mtime and - stat.inode == file.stat.inode; - - if (unchanged_metadata) { - log.debug("unmodified metadata of file: {s}", .{file.sub_file_path}); - return; - } - - log.debug("metadata changed: {s}", .{file.sub_file_path}); - - break :lock .exclusive; - }, - }; - - // We ask for a lock in order to coordinate with other zig processes. - // If another process is already working on this file, we will get the cached - // version. Likewise if we're working on AstGen and another process asks for - // the cached file, they'll get it. - const cache_file = while (true) { - break zir_dir.createFile(&hex_digest, .{ - .read = true, - .truncate = false, - .lock = lock, - }) catch |err| switch (err) { - error.NotDir => unreachable, // no dir components - error.InvalidUtf8 => unreachable, // it's a hex encoded name - error.InvalidWtf8 => unreachable, // it's a hex encoded name - error.BadPathName => unreachable, // it's a hex encoded name - error.NameTooLong => unreachable, // it's a fixed size name - error.PipeBusy => unreachable, // it's not a pipe - error.WouldBlock => unreachable, // not asking for non-blocking I/O - // There are no dir components, so you would think that this was - // unreachable, however we have observed on macOS two processes racing - // to do openat() with O_CREAT manifest in ENOENT. - error.FileNotFound => continue, - - else => |e| return e, // Retryable errors are handled at callsite. - }; - }; - defer cache_file.close(); - - while (true) { - update: { - // First we read the header to determine the lengths of arrays. - const header = cache_file.reader().readStruct(Zir.Header) catch |err| switch (err) { - // This can happen if Zig bails out of this function between creating - // the cached file and writing it. - error.EndOfStream => break :update, - else => |e| return e, - }; - const unchanged_metadata = - stat.size == header.stat_size and - stat.mtime == header.stat_mtime and - stat.inode == header.stat_inode; - - if (!unchanged_metadata) { - log.debug("AstGen cache stale: {s}", .{file.sub_file_path}); - break :update; - } - log.debug("AstGen cache hit: {s} instructions_len={d}", .{ - file.sub_file_path, header.instructions_len, - }); - - file.zir = loadZirCacheBody(gpa, header, cache_file) catch |err| switch (err) { - error.UnexpectedFileSize => { - log.warn("unexpected EOF reading cached ZIR for {s}", .{file.sub_file_path}); - break :update; - }, - else => |e| return e, - }; - file.zir_loaded = true; - file.stat = .{ - .size = header.stat_size, - .inode = header.stat_inode, - .mtime = header.stat_mtime, - }; - file.status = .success_zir; - log.debug("AstGen cached success: {s}", .{file.sub_file_path}); - - // TODO don't report compile errors until Sema @importFile - if (file.zir.hasCompileErrors()) { - { - comp.mutex.lock(); - defer comp.mutex.unlock(); - try zcu.failed_files.putNoClobber(gpa, file, null); - } - file.status = .astgen_failure; - return error.AnalysisFail; - } - return; - } - - // If we already have the exclusive lock then it is our job to update. - if (builtin.os.tag == .wasi or lock == .exclusive) break; - // Otherwise, unlock to give someone a chance to get the exclusive lock - // and then upgrade to an exclusive lock. - cache_file.unlock(); - lock = .exclusive; - try cache_file.lock(lock); - } - - // The cache is definitely stale so delete the contents to avoid an underwrite later. - cache_file.setEndPos(0) catch |err| switch (err) { - error.FileTooBig => unreachable, // 0 is not too big - - else => |e| return e, - }; - - zcu.lockAndClearFileCompileError(file); - - // If the previous ZIR does not have compile errors, keep it around - // in case parsing or new ZIR fails. In case of successful ZIR update - // at the end of this function we will free it. - // We keep the previous ZIR loaded so that we can use it - // for the update next time it does not have any compile errors. This avoids - // needlessly tossing out semantic analysis work when an error is - // temporarily introduced. - if (file.zir_loaded and !file.zir.hasCompileErrors()) { - assert(file.prev_zir == null); - const prev_zir_ptr = try gpa.create(Zir); - file.prev_zir = prev_zir_ptr; - prev_zir_ptr.* = file.zir; - file.zir = undefined; - file.zir_loaded = false; - } - file.unload(gpa); - - if (stat.size > std.math.maxInt(u32)) - return error.FileTooBig; - - const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); - defer if (!file.source_loaded) gpa.free(source); - const amt = try source_file.readAll(source); - if (amt != stat.size) - return error.UnexpectedEndOfFile; - - file.stat = .{ - .size = stat.size, - .inode = stat.inode, - .mtime = stat.mtime, - }; - file.source = source; - file.source_loaded = true; - - file.tree = try Ast.parse(gpa, source, .zig); - file.tree_loaded = true; - - // Any potential AST errors are converted to ZIR errors here. - file.zir = try AstGen.generate(gpa, file.tree); - file.zir_loaded = true; - file.status = .success_zir; - log.debug("AstGen fresh success: {s}", .{file.sub_file_path}); - - const safety_buffer = if (data_has_safety_tag) - try gpa.alloc([8]u8, file.zir.instructions.len) - else - undefined; - defer if (data_has_safety_tag) gpa.free(safety_buffer); - const data_ptr = if (data_has_safety_tag) - if (file.zir.instructions.len == 0) - @as([*]const u8, undefined) - else - @as([*]const u8, @ptrCast(safety_buffer.ptr)) - else - @as([*]const u8, @ptrCast(file.zir.instructions.items(.data).ptr)); - if (data_has_safety_tag) { - // The `Data` union has a safety tag but in the file format we store it without. - for (file.zir.instructions.items(.data), 0..) |*data, i| { - const as_struct = @as(*const HackDataLayout, @ptrCast(data)); - safety_buffer[i] = as_struct.data; - } - } - - const header: Zir.Header = .{ - .instructions_len = @as(u32, @intCast(file.zir.instructions.len)), - .string_bytes_len = @as(u32, @intCast(file.zir.string_bytes.len)), - .extra_len = @as(u32, @intCast(file.zir.extra.len)), - - .stat_size = stat.size, - .stat_inode = stat.inode, - .stat_mtime = stat.mtime, - }; - var iovecs = [_]std.posix.iovec_const{ - .{ - .base = @as([*]const u8, @ptrCast(&header)), - .len = @sizeOf(Zir.Header), - }, - .{ - .base = @as([*]const u8, @ptrCast(file.zir.instructions.items(.tag).ptr)), - .len = file.zir.instructions.len, - }, - .{ - .base = data_ptr, - .len = file.zir.instructions.len * 8, - }, - .{ - .base = file.zir.string_bytes.ptr, - .len = file.zir.string_bytes.len, - }, - .{ - .base = @as([*]const u8, @ptrCast(file.zir.extra.ptr)), - .len = file.zir.extra.len * 4, - }, - }; - cache_file.writevAll(&iovecs) catch |err| { - log.warn("unable to write cached ZIR code for {}{s} to {}{s}: {s}", .{ - file.mod.root, file.sub_file_path, cache_directory, &hex_digest, @errorName(err), - }); - }; - - if (file.zir.hasCompileErrors()) { - { - comp.mutex.lock(); - defer comp.mutex.unlock(); - try zcu.failed_files.putNoClobber(gpa, file, null); - } - file.status = .astgen_failure; - return error.AnalysisFail; - } - - if (file.prev_zir) |prev_zir| { - try updateZirRefs(zcu, file, file_index, prev_zir.*); - // No need to keep previous ZIR. - prev_zir.deinit(gpa); - gpa.destroy(prev_zir); - file.prev_zir = null; - } - - if (opt_root_decl.unwrap()) |root_decl| { - // The root of this file must be re-analyzed, since the file has changed. - comp.mutex.lock(); - defer comp.mutex.unlock(); - - log.debug("outdated root Decl: {}", .{root_decl}); - try zcu.outdated_file_root.put(gpa, root_decl, {}); - } -} - pub fn loadZirCache(gpa: Allocator, cache_file: std.fs.File) !Zir { return loadZirCacheBody(gpa, try cache_file.reader().readStruct(Zir.Header), cache_file); } -fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) !Zir { +pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) !Zir { var instructions: std.MultiArrayList(Zir.Inst) = .{}; errdefer instructions.deinit(gpa); @@ -2930,127 +2652,6 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) return zir; } -/// This is called from the AstGen thread pool, so must acquire -/// the Compilation mutex when acting on shared state. -fn updateZirRefs(zcu: *Module, file: *File, file_index: File.Index, old_zir: Zir) !void { - const gpa = zcu.gpa; - const new_zir = file.zir; - - var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}; - defer inst_map.deinit(gpa); - - try mapOldZirToNew(gpa, old_zir, new_zir, &inst_map); - - const old_tag = old_zir.instructions.items(.tag); - const old_data = old_zir.instructions.items(.data); - - // TODO: this should be done after all AstGen workers complete, to avoid - // iterating over this full set for every updated file. - for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| { - const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw); - if (ti.file != file_index) continue; - const old_inst = ti.inst; - ti.inst = inst_map.get(ti.inst) orelse { - // Tracking failed for this instruction. Invalidate associated `src_hash` deps. - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - log.debug("tracking failed for %{d}", .{old_inst}); - try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); - continue; - }; - - if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { - if (new_zir.getAssociatedSrcHash(ti.inst)) |new_hash| { - if (std.zig.srcHashEql(old_hash, new_hash)) { - break :hash_changed; - } - log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ - old_inst, - ti.inst, - std.fmt.fmtSliceHexLower(&old_hash), - std.fmt.fmtSliceHexLower(&new_hash), - }); - } - // The source hash associated with this instruction changed - invalidate relevant dependencies. - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); - } - - // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. - const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { - .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { - .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, - else => false, - }, - else => false, - }; - if (!has_namespace) continue; - - var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; - defer old_names.deinit(zcu.gpa); - { - var it = old_zir.declIterator(old_inst); - while (it.next()) |decl_inst| { - const decl_name = old_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(old_zir)) continue, - } - const name_zir = decl_name.toString(old_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - old_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - try old_names.put(zcu.gpa, name_ip, {}); - } - } - var any_change = false; - { - var it = new_zir.declIterator(ti.inst); - while (it.next()) |decl_inst| { - const decl_name = old_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(old_zir)) continue, - } - const name_zir = decl_name.toString(old_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - old_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - if (!old_names.swapRemove(name_ip)) continue; - // Name added - any_change = true; - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace_name = .{ - .namespace = ti_idx, - .name = name_ip, - } }); - } - } - // The only elements remaining in `old_names` now are any names which were removed. - for (old_names.keys()) |name_ip| { - any_change = true; - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace_name = .{ - .namespace = ti_idx, - .name = name_ip, - } }); - } - - if (any_change) { - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace = ti_idx }); - } - } -} - pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void { log.debug("outdated dependee: {}", .{dependee}); var it = zcu.intern_pool.dependencyIterator(dependee); @@ -3695,268 +3296,6 @@ fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) return bin; } -pub fn scanNamespace( - zcu: *Zcu, - namespace_index: Namespace.Index, - decls: []const Zir.Inst.Index, - parent_decl: *Decl, -) Allocator.Error!void { - const tracy = trace(@src()); - defer tracy.end(); - - const gpa = zcu.gpa; - const namespace = zcu.namespacePtr(namespace_index); - - // For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather - // than their name. We'll build an efficient mapping now, then discard the current `decls`. - var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Decl.Index) = .{}; - defer existing_by_inst.deinit(gpa); - - try existing_by_inst.ensureTotalCapacity(gpa, @intCast(namespace.decls.count())); - - for (namespace.decls.keys()) |decl_index| { - const decl = zcu.declPtr(decl_index); - existing_by_inst.putAssumeCapacityNoClobber(decl.zir_decl_index.unwrap().?, decl_index); - } - - var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; - defer seen_decls.deinit(gpa); - - try zcu.comp.work_queue.ensureUnusedCapacity(decls.len); - - namespace.decls.clearRetainingCapacity(); - try namespace.decls.ensureTotalCapacity(gpa, decls.len); - - namespace.usingnamespace_set.clearRetainingCapacity(); - - var scan_decl_iter: ScanDeclIter = .{ - .zcu = zcu, - .namespace_index = namespace_index, - .parent_decl = parent_decl, - .seen_decls = &seen_decls, - .existing_by_inst = &existing_by_inst, - .pass = .named, - }; - for (decls) |decl_inst| { - try scanDecl(&scan_decl_iter, decl_inst); - } - scan_decl_iter.pass = .unnamed; - for (decls) |decl_inst| { - try scanDecl(&scan_decl_iter, decl_inst); - } - - if (seen_decls.count() != namespace.decls.count()) { - // Do a pass over the namespace contents and remove any decls from the last update - // which were removed in this one. - var i: usize = 0; - while (i < namespace.decls.count()) { - const decl_index = namespace.decls.keys()[i]; - const decl = zcu.declPtr(decl_index); - if (!seen_decls.contains(decl.name)) { - // We must preserve namespace ordering for @typeInfo. - namespace.decls.orderedRemoveAt(i); - i -= 1; - } - } - } -} - -const ScanDeclIter = struct { - zcu: *Zcu, - namespace_index: Namespace.Index, - parent_decl: *Decl, - seen_decls: *std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), - existing_by_inst: *const std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Decl.Index), - /// Decl scanning is run in two passes, so that we can detect when a generated - /// name would clash with an explicit name and use a different one. - pass: enum { named, unnamed }, - usingnamespace_index: usize = 0, - comptime_index: usize = 0, - unnamed_test_index: usize = 0, - - fn avoidNameConflict(iter: *ScanDeclIter, comptime fmt: []const u8, args: anytype) !InternPool.NullTerminatedString { - const zcu = iter.zcu; - const gpa = zcu.gpa; - const ip = &zcu.intern_pool; - var name = try ip.getOrPutStringFmt(gpa, fmt, args, .no_embedded_nulls); - var gop = try iter.seen_decls.getOrPut(gpa, name); - var next_suffix: u32 = 0; - while (gop.found_existing) { - name = try ip.getOrPutStringFmt(gpa, "{}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls); - gop = try iter.seen_decls.getOrPut(gpa, name); - next_suffix += 1; - } - return name; - } -}; - -fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void { - const tracy = trace(@src()); - defer tracy.end(); - - const zcu = iter.zcu; - const namespace_index = iter.namespace_index; - const namespace = zcu.namespacePtr(namespace_index); - const gpa = zcu.gpa; - const zir = namespace.fileScope(zcu).zir; - const ip = &zcu.intern_pool; - - const inst_data = zir.instructions.items(.data)[@intFromEnum(decl_inst)].declaration; - const extra = zir.extraData(Zir.Inst.Declaration, inst_data.payload_index); - const declaration = extra.data; - - // Every Decl needs a name. - const decl_name: InternPool.NullTerminatedString, const kind: Decl.Kind, const is_named_test: bool = switch (declaration.name) { - .@"comptime" => info: { - if (iter.pass != .unnamed) return; - const i = iter.comptime_index; - iter.comptime_index += 1; - break :info .{ - try iter.avoidNameConflict("comptime_{d}", .{i}), - .@"comptime", - false, - }; - }, - .@"usingnamespace" => info: { - // TODO: this isn't right! These should be considered unnamed. Name conflicts can happen here. - // The problem is, we need to preserve the decl ordering for `@typeInfo`. - // I'm not bothering to fix this now, since some upcoming changes will change this code significantly anyway. - if (iter.pass != .named) return; - const i = iter.usingnamespace_index; - iter.usingnamespace_index += 1; - break :info .{ - try iter.avoidNameConflict("usingnamespace_{d}", .{i}), - .@"usingnamespace", - false, - }; - }, - .unnamed_test => info: { - if (iter.pass != .unnamed) return; - const i = iter.unnamed_test_index; - iter.unnamed_test_index += 1; - break :info .{ - try iter.avoidNameConflict("test_{d}", .{i}), - .@"test", - false, - }; - }, - .decltest => info: { - // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. - if (iter.pass != .unnamed) return; - assert(declaration.flags.has_doc_comment); - const name = zir.nullTerminatedString(@enumFromInt(zir.extra[extra.end])); - break :info .{ - try iter.avoidNameConflict("decltest.{s}", .{name}), - .@"test", - true, - }; - }, - _ => if (declaration.name.isNamedTest(zir)) info: { - // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. - if (iter.pass != .unnamed) return; - break :info .{ - try iter.avoidNameConflict("test.{s}", .{zir.nullTerminatedString(declaration.name.toString(zir).?)}), - .@"test", - true, - }; - } else info: { - if (iter.pass != .named) return; - const name = try ip.getOrPutString( - gpa, - zir.nullTerminatedString(declaration.name.toString(zir).?), - .no_embedded_nulls, - ); - try iter.seen_decls.putNoClobber(gpa, name, {}); - break :info .{ - name, - .named, - false, - }; - }, - }; - - switch (kind) { - .@"usingnamespace" => try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1), - .@"test" => try zcu.test_functions.ensureUnusedCapacity(gpa, 1), - else => {}, - } - - const parent_file_scope_index = iter.parent_decl.getFileScopeIndex(zcu); - const tracked_inst = try ip.trackZir(gpa, parent_file_scope_index, decl_inst); - - // We create a Decl for it regardless of analysis status. - - const prev_exported, const decl_index = if (iter.existing_by_inst.get(tracked_inst)) |decl_index| decl_index: { - // We need only update this existing Decl. - const decl = zcu.declPtr(decl_index); - const was_exported = decl.is_exported; - assert(decl.kind == kind); // ZIR tracking should preserve this - decl.name = decl_name; - decl.is_pub = declaration.flags.is_pub; - decl.is_exported = declaration.flags.is_export; - break :decl_index .{ was_exported, decl_index }; - } else decl_index: { - // Create and set up a new Decl. - const new_decl_index = try zcu.allocateNewDecl(namespace_index); - const new_decl = zcu.declPtr(new_decl_index); - new_decl.kind = kind; - new_decl.name = decl_name; - new_decl.is_pub = declaration.flags.is_pub; - new_decl.is_exported = declaration.flags.is_export; - new_decl.zir_decl_index = tracked_inst.toOptional(); - break :decl_index .{ false, new_decl_index }; - }; - - const decl = zcu.declPtr(decl_index); - - namespace.decls.putAssumeCapacityNoClobberContext(decl_index, {}, .{ .zcu = zcu }); - - const comp = zcu.comp; - const decl_mod = namespace.fileScope(zcu).mod; - const want_analysis = declaration.flags.is_export or switch (kind) { - .anon => unreachable, - .@"comptime" => true, - .@"usingnamespace" => a: { - namespace.usingnamespace_set.putAssumeCapacityNoClobber(decl_index, declaration.flags.is_pub); - break :a true; - }, - .named => false, - .@"test" => a: { - if (!comp.config.is_test) break :a false; - if (decl_mod != zcu.main_mod) break :a false; - if (is_named_test and comp.test_filters.len > 0) { - const decl_fqn = try namespace.fullyQualifiedName(zcu, decl_name); - const decl_fqn_slice = decl_fqn.toSlice(ip); - for (comp.test_filters) |test_filter| { - if (mem.indexOf(u8, decl_fqn_slice, test_filter)) |_| break; - } else break :a false; - } - zcu.test_functions.putAssumeCapacity(decl_index, {}); // may clobber on incremental update - break :a true; - }, - }; - - if (want_analysis) { - // We will not queue analysis if the decl has been analyzed on a previous update and - // `is_export` is unchanged. In this case, the incremental update mechanism will handle - // re-analysis for us if necessary. - if (prev_exported != declaration.flags.is_export or decl.analysis == .unreferenced) { - log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{}' decl_index={d}", .{ - namespace.fileScope(zcu).sub_file_path, decl_name.fmt(ip), decl_index, - }); - comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = decl_index }); - } - } - - if (decl.getOwnedFunction(zcu) != null) { - // TODO this logic is insufficient; namespaces we don't re-scan may still require - // updated line numbers. Look into this! - // TODO Look into detecting when this would be unnecessary by storing enough state - // in `Decl` to notice that the line number did not change. - comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index }); - } -} - /// Cancel the creation of an anon decl and delete any references to it. /// If other decls depend on this decl, they must be aborted first. pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 785a5d52e0..8cf6922345 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -5,6 +5,411 @@ tid: Id, pub const Id = if (builtin.single_threaded) enum { main } else enum(usize) { main, _ }; +pub fn astGenFile( + pt: Zcu.PerThread, + file: *Zcu.File, + /// This parameter is provided separately from `file` because it is not + /// safe to access `import_table` without a lock, and this index is needed + /// in the call to `updateZirRefs`. + file_index: Zcu.File.Index, + path_digest: Cache.BinDigest, + opt_root_decl: Zcu.Decl.OptionalIndex, +) !void { + assert(!file.mod.isBuiltin()); + + const tracy = trace(@src()); + defer tracy.end(); + + const zcu = pt.zcu; + const comp = zcu.comp; + const gpa = zcu.gpa; + + // In any case we need to examine the stat of the file to determine the course of action. + var source_file = try file.mod.root.openFile(file.sub_file_path, .{}); + defer source_file.close(); + + const stat = try source_file.stat(); + + const want_local_cache = file.mod == zcu.main_mod; + const hex_digest = Cache.binToHex(path_digest); + const cache_directory = if (want_local_cache) zcu.local_zir_cache else zcu.global_zir_cache; + const zir_dir = cache_directory.handle; + + // Determine whether we need to reload the file from disk and redo parsing and AstGen. + var lock: std.fs.File.Lock = switch (file.status) { + .never_loaded, .retryable_failure => lock: { + // First, load the cached ZIR code, if any. + log.debug("AstGen checking cache: {s} (local={}, digest={s})", .{ + file.sub_file_path, want_local_cache, &hex_digest, + }); + + break :lock .shared; + }, + .parse_failure, .astgen_failure, .success_zir => lock: { + const unchanged_metadata = + stat.size == file.stat.size and + stat.mtime == file.stat.mtime and + stat.inode == file.stat.inode; + + if (unchanged_metadata) { + log.debug("unmodified metadata of file: {s}", .{file.sub_file_path}); + return; + } + + log.debug("metadata changed: {s}", .{file.sub_file_path}); + + break :lock .exclusive; + }, + }; + + // We ask for a lock in order to coordinate with other zig processes. + // If another process is already working on this file, we will get the cached + // version. Likewise if we're working on AstGen and another process asks for + // the cached file, they'll get it. + const cache_file = while (true) { + break zir_dir.createFile(&hex_digest, .{ + .read = true, + .truncate = false, + .lock = lock, + }) catch |err| switch (err) { + error.NotDir => unreachable, // no dir components + error.InvalidUtf8 => unreachable, // it's a hex encoded name + error.InvalidWtf8 => unreachable, // it's a hex encoded name + error.BadPathName => unreachable, // it's a hex encoded name + error.NameTooLong => unreachable, // it's a fixed size name + error.PipeBusy => unreachable, // it's not a pipe + error.WouldBlock => unreachable, // not asking for non-blocking I/O + // There are no dir components, so you would think that this was + // unreachable, however we have observed on macOS two processes racing + // to do openat() with O_CREAT manifest in ENOENT. + error.FileNotFound => continue, + + else => |e| return e, // Retryable errors are handled at callsite. + }; + }; + defer cache_file.close(); + + while (true) { + update: { + // First we read the header to determine the lengths of arrays. + const header = cache_file.reader().readStruct(Zir.Header) catch |err| switch (err) { + // This can happen if Zig bails out of this function between creating + // the cached file and writing it. + error.EndOfStream => break :update, + else => |e| return e, + }; + const unchanged_metadata = + stat.size == header.stat_size and + stat.mtime == header.stat_mtime and + stat.inode == header.stat_inode; + + if (!unchanged_metadata) { + log.debug("AstGen cache stale: {s}", .{file.sub_file_path}); + break :update; + } + log.debug("AstGen cache hit: {s} instructions_len={d}", .{ + file.sub_file_path, header.instructions_len, + }); + + file.zir = Zcu.loadZirCacheBody(gpa, header, cache_file) catch |err| switch (err) { + error.UnexpectedFileSize => { + log.warn("unexpected EOF reading cached ZIR for {s}", .{file.sub_file_path}); + break :update; + }, + else => |e| return e, + }; + file.zir_loaded = true; + file.stat = .{ + .size = header.stat_size, + .inode = header.stat_inode, + .mtime = header.stat_mtime, + }; + file.status = .success_zir; + log.debug("AstGen cached success: {s}", .{file.sub_file_path}); + + // TODO don't report compile errors until Sema @importFile + if (file.zir.hasCompileErrors()) { + { + comp.mutex.lock(); + defer comp.mutex.unlock(); + try zcu.failed_files.putNoClobber(gpa, file, null); + } + file.status = .astgen_failure; + return error.AnalysisFail; + } + return; + } + + // If we already have the exclusive lock then it is our job to update. + if (builtin.os.tag == .wasi or lock == .exclusive) break; + // Otherwise, unlock to give someone a chance to get the exclusive lock + // and then upgrade to an exclusive lock. + cache_file.unlock(); + lock = .exclusive; + try cache_file.lock(lock); + } + + // The cache is definitely stale so delete the contents to avoid an underwrite later. + cache_file.setEndPos(0) catch |err| switch (err) { + error.FileTooBig => unreachable, // 0 is not too big + + else => |e| return e, + }; + + pt.lockAndClearFileCompileError(file); + + // If the previous ZIR does not have compile errors, keep it around + // in case parsing or new ZIR fails. In case of successful ZIR update + // at the end of this function we will free it. + // We keep the previous ZIR loaded so that we can use it + // for the update next time it does not have any compile errors. This avoids + // needlessly tossing out semantic analysis work when an error is + // temporarily introduced. + if (file.zir_loaded and !file.zir.hasCompileErrors()) { + assert(file.prev_zir == null); + const prev_zir_ptr = try gpa.create(Zir); + file.prev_zir = prev_zir_ptr; + prev_zir_ptr.* = file.zir; + file.zir = undefined; + file.zir_loaded = false; + } + file.unload(gpa); + + if (stat.size > std.math.maxInt(u32)) + return error.FileTooBig; + + const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); + defer if (!file.source_loaded) gpa.free(source); + const amt = try source_file.readAll(source); + if (amt != stat.size) + return error.UnexpectedEndOfFile; + + file.stat = .{ + .size = stat.size, + .inode = stat.inode, + .mtime = stat.mtime, + }; + file.source = source; + file.source_loaded = true; + + file.tree = try Ast.parse(gpa, source, .zig); + file.tree_loaded = true; + + // Any potential AST errors are converted to ZIR errors here. + file.zir = try AstGen.generate(gpa, file.tree); + file.zir_loaded = true; + file.status = .success_zir; + log.debug("AstGen fresh success: {s}", .{file.sub_file_path}); + + const safety_buffer = if (Zcu.data_has_safety_tag) + try gpa.alloc([8]u8, file.zir.instructions.len) + else + undefined; + defer if (Zcu.data_has_safety_tag) gpa.free(safety_buffer); + const data_ptr = if (Zcu.data_has_safety_tag) + if (file.zir.instructions.len == 0) + @as([*]const u8, undefined) + else + @as([*]const u8, @ptrCast(safety_buffer.ptr)) + else + @as([*]const u8, @ptrCast(file.zir.instructions.items(.data).ptr)); + if (Zcu.data_has_safety_tag) { + // The `Data` union has a safety tag but in the file format we store it without. + for (file.zir.instructions.items(.data), 0..) |*data, i| { + const as_struct: *const Zcu.HackDataLayout = @ptrCast(data); + safety_buffer[i] = as_struct.data; + } + } + + const header: Zir.Header = .{ + .instructions_len = @as(u32, @intCast(file.zir.instructions.len)), + .string_bytes_len = @as(u32, @intCast(file.zir.string_bytes.len)), + .extra_len = @as(u32, @intCast(file.zir.extra.len)), + + .stat_size = stat.size, + .stat_inode = stat.inode, + .stat_mtime = stat.mtime, + }; + var iovecs = [_]std.posix.iovec_const{ + .{ + .base = @as([*]const u8, @ptrCast(&header)), + .len = @sizeOf(Zir.Header), + }, + .{ + .base = @as([*]const u8, @ptrCast(file.zir.instructions.items(.tag).ptr)), + .len = file.zir.instructions.len, + }, + .{ + .base = data_ptr, + .len = file.zir.instructions.len * 8, + }, + .{ + .base = file.zir.string_bytes.ptr, + .len = file.zir.string_bytes.len, + }, + .{ + .base = @as([*]const u8, @ptrCast(file.zir.extra.ptr)), + .len = file.zir.extra.len * 4, + }, + }; + cache_file.writevAll(&iovecs) catch |err| { + log.warn("unable to write cached ZIR code for {}{s} to {}{s}: {s}", .{ + file.mod.root, file.sub_file_path, cache_directory, &hex_digest, @errorName(err), + }); + }; + + if (file.zir.hasCompileErrors()) { + { + comp.mutex.lock(); + defer comp.mutex.unlock(); + try zcu.failed_files.putNoClobber(gpa, file, null); + } + file.status = .astgen_failure; + return error.AnalysisFail; + } + + if (file.prev_zir) |prev_zir| { + try pt.updateZirRefs(file, file_index, prev_zir.*); + // No need to keep previous ZIR. + prev_zir.deinit(gpa); + gpa.destroy(prev_zir); + file.prev_zir = null; + } + + if (opt_root_decl.unwrap()) |root_decl| { + // The root of this file must be re-analyzed, since the file has changed. + comp.mutex.lock(); + defer comp.mutex.unlock(); + + log.debug("outdated root Decl: {}", .{root_decl}); + try zcu.outdated_file_root.put(gpa, root_decl, {}); + } +} + +/// This is called from the AstGen thread pool, so must acquire +/// the Compilation mutex when acting on shared state. +fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index, old_zir: Zir) !void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const new_zir = file.zir; + + var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}; + defer inst_map.deinit(gpa); + + try Zcu.mapOldZirToNew(gpa, old_zir, new_zir, &inst_map); + + const old_tag = old_zir.instructions.items(.tag); + const old_data = old_zir.instructions.items(.data); + + // TODO: this should be done after all AstGen workers complete, to avoid + // iterating over this full set for every updated file. + for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| { + const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw); + if (ti.file != file_index) continue; + const old_inst = ti.inst; + ti.inst = inst_map.get(ti.inst) orelse { + // Tracking failed for this instruction. Invalidate associated `src_hash` deps. + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + log.debug("tracking failed for %{d}", .{old_inst}); + try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); + continue; + }; + + if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { + if (new_zir.getAssociatedSrcHash(ti.inst)) |new_hash| { + if (std.zig.srcHashEql(old_hash, new_hash)) { + break :hash_changed; + } + log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ + old_inst, + ti.inst, + std.fmt.fmtSliceHexLower(&old_hash), + std.fmt.fmtSliceHexLower(&new_hash), + }); + } + // The source hash associated with this instruction changed - invalidate relevant dependencies. + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); + } + + // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. + const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { + .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { + .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, + else => false, + }, + else => false, + }; + if (!has_namespace) continue; + + var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + defer old_names.deinit(zcu.gpa); + { + var it = old_zir.declIterator(old_inst); + while (it.next()) |decl_inst| { + const decl_name = old_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(old_zir)) continue, + } + const name_zir = decl_name.toString(old_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + old_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + try old_names.put(zcu.gpa, name_ip, {}); + } + } + var any_change = false; + { + var it = new_zir.declIterator(ti.inst); + while (it.next()) |decl_inst| { + const decl_name = old_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(old_zir)) continue, + } + const name_zir = decl_name.toString(old_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + old_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + if (!old_names.swapRemove(name_ip)) continue; + // Name added + any_change = true; + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace_name = .{ + .namespace = ti_idx, + .name = name_ip, + } }); + } + } + // The only elements remaining in `old_names` now are any names which were removed. + for (old_names.keys()) |name_ip| { + any_change = true; + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace_name = .{ + .namespace = ti_idx, + .name = name_ip, + } }); + } + + if (any_change) { + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace = ti_idx }); + } + } +} + /// Like `ensureDeclAnalyzed`, but the Decl is a file's root Decl. pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { if (pt.zcu.fileRootDecl(file_index).unwrap()) |existing_root| { @@ -91,7 +496,7 @@ pub fn ensureDeclAnalyzed(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Zcu.Sem }; } - const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); + const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0); defer decl_prog_node.end(); break :blk pt.semaDecl(decl_index) catch |err| switch (err) { @@ -290,7 +695,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai defer liveness.deinit(gpa); if (build_options.enable_debug_extensions and comp.verbose_air) { - const fqn = try decl.fullyQualifiedName(zcu); + const fqn = try decl.fullyQualifiedName(pt); std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); @import("../print_air.zig").dump(pt, air, liveness); std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)}); @@ -324,7 +729,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai }; } - const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0); + const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0); defer codegen_prog_node.end(); if (!air.typesFullyResolved(zcu)) { @@ -434,7 +839,7 @@ fn getFileRootStruct( decl.owns_tv = true; decl.analysis = .complete; - try zcu.scanNamespace(namespace_index, decls, decl); + try pt.scanNamespace(namespace_index, decls, decl); try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return wip_ty.finish(ip, decl_index, namespace_index.toOptional()); } @@ -502,7 +907,7 @@ fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: const decls = file.zir.bodySlice(extra_index, decls_len); if (!type_outdated) { - try zcu.scanNamespace(decl.src_namespace, decls, decl); + try pt.scanNamespace(decl.src_namespace, decls, decl); } return false; @@ -539,7 +944,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { zcu.setFileRootDecl(file_index, new_decl_index.toOptional()); zcu.namespacePtr(new_namespace_index).decl_index = new_decl_index; - new_decl.name = try file.fullyQualifiedName(zcu); + new_decl.name = try file.fullyQualifiedName(pt); new_decl.name_fully_qualified = true; new_decl.is_pub = true; new_decl.is_exported = false; @@ -601,9 +1006,9 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { } log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)}); - log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(zcu)).fmt(ip)}); + log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(pt)).fmt(ip)}); defer blk: { - log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(zcu) catch break :blk).fmt(ip)}); + log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(pt) catch break :blk).fmt(ip)}); } const old_has_tv = decl.has_tv; @@ -631,7 +1036,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { const std_file_root_decl_index = zcu.fileRootDecl(std_file_imported.file_index); const std_decl = zcu.declPtr(std_file_root_decl_index.unwrap().?); const std_namespace = std_decl.getInnerNamespace(zcu).?; - const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); + const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls); const builtin_decl = zcu.declPtr(std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse break :ip_index .none); const builtin_namespace = builtin_decl.getInnerNamespaceIndex(zcu).unwrap() orelse break :ip_index .none; if (decl.src_namespace != builtin_namespace) break :ip_index .none; @@ -802,7 +1207,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { } else if (bytes.len == 0) { return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{}); } - break :blk try ip.getOrPutStringOpt(gpa, bytes, .no_embedded_nulls); + break :blk try ip.getOrPutStringOpt(gpa, pt.tid, bytes, .no_embedded_nulls); }; decl.@"addrspace" = blk: { const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_val.toIntern())) { @@ -996,7 +1401,7 @@ fn newEmbedFile( } }); const array_val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, - .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, bytes.len, .maybe_embedded_nulls) }, + .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, pt.tid, bytes.len, .maybe_embedded_nulls) }, } }); const ptr_ty = (try pt.ptrType(.{ @@ -1018,7 +1423,7 @@ fn newEmbedFile( result.* = new_file; new_file.* = .{ - .sub_file_path = try ip.getOrPutString(gpa, sub_file_path, .no_embedded_nulls), + .sub_file_path = try ip.getOrPutString(gpa, pt.tid, sub_file_path, .no_embedded_nulls), .owner = pkg, .stat = stat, .val = ptr_val, @@ -1027,6 +1432,271 @@ fn newEmbedFile( return ptr_val; } +pub fn scanNamespace( + pt: Zcu.PerThread, + namespace_index: Zcu.Namespace.Index, + decls: []const Zir.Inst.Index, + parent_decl: *Zcu.Decl, +) Allocator.Error!void { + const tracy = trace(@src()); + defer tracy.end(); + + const zcu = pt.zcu; + const gpa = zcu.gpa; + const namespace = zcu.namespacePtr(namespace_index); + + // For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather + // than their name. We'll build an efficient mapping now, then discard the current `decls`. + var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Zcu.Decl.Index) = .{}; + defer existing_by_inst.deinit(gpa); + + try existing_by_inst.ensureTotalCapacity(gpa, @intCast(namespace.decls.count())); + + for (namespace.decls.keys()) |decl_index| { + const decl = zcu.declPtr(decl_index); + existing_by_inst.putAssumeCapacityNoClobber(decl.zir_decl_index.unwrap().?, decl_index); + } + + var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + defer seen_decls.deinit(gpa); + + try zcu.comp.work_queue.ensureUnusedCapacity(decls.len); + + namespace.decls.clearRetainingCapacity(); + try namespace.decls.ensureTotalCapacity(gpa, decls.len); + + namespace.usingnamespace_set.clearRetainingCapacity(); + + var scan_decl_iter: ScanDeclIter = .{ + .pt = pt, + .namespace_index = namespace_index, + .parent_decl = parent_decl, + .seen_decls = &seen_decls, + .existing_by_inst = &existing_by_inst, + .pass = .named, + }; + for (decls) |decl_inst| { + try scan_decl_iter.scanDecl(decl_inst); + } + scan_decl_iter.pass = .unnamed; + for (decls) |decl_inst| { + try scan_decl_iter.scanDecl(decl_inst); + } + + if (seen_decls.count() != namespace.decls.count()) { + // Do a pass over the namespace contents and remove any decls from the last update + // which were removed in this one. + var i: usize = 0; + while (i < namespace.decls.count()) { + const decl_index = namespace.decls.keys()[i]; + const decl = zcu.declPtr(decl_index); + if (!seen_decls.contains(decl.name)) { + // We must preserve namespace ordering for @typeInfo. + namespace.decls.orderedRemoveAt(i); + i -= 1; + } + } + } +} + +const ScanDeclIter = struct { + pt: Zcu.PerThread, + namespace_index: Zcu.Namespace.Index, + parent_decl: *Zcu.Decl, + seen_decls: *std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), + existing_by_inst: *const std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Zcu.Decl.Index), + /// Decl scanning is run in two passes, so that we can detect when a generated + /// name would clash with an explicit name and use a different one. + pass: enum { named, unnamed }, + usingnamespace_index: usize = 0, + comptime_index: usize = 0, + unnamed_test_index: usize = 0, + + fn avoidNameConflict(iter: *ScanDeclIter, comptime fmt: []const u8, args: anytype) !InternPool.NullTerminatedString { + const pt = iter.pt; + const gpa = pt.zcu.gpa; + const ip = &pt.zcu.intern_pool; + var name = try ip.getOrPutStringFmt(gpa, pt.tid, fmt, args, .no_embedded_nulls); + var gop = try iter.seen_decls.getOrPut(gpa, name); + var next_suffix: u32 = 0; + while (gop.found_existing) { + name = try ip.getOrPutStringFmt(gpa, pt.tid, "{}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls); + gop = try iter.seen_decls.getOrPut(gpa, name); + next_suffix += 1; + } + return name; + } + + fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void { + const tracy = trace(@src()); + defer tracy.end(); + + const pt = iter.pt; + const zcu = pt.zcu; + const namespace_index = iter.namespace_index; + const namespace = zcu.namespacePtr(namespace_index); + const gpa = zcu.gpa; + const zir = namespace.fileScope(zcu).zir; + const ip = &zcu.intern_pool; + + const inst_data = zir.instructions.items(.data)[@intFromEnum(decl_inst)].declaration; + const extra = zir.extraData(Zir.Inst.Declaration, inst_data.payload_index); + const declaration = extra.data; + + // Every Decl needs a name. + const decl_name: InternPool.NullTerminatedString, const kind: Zcu.Decl.Kind, const is_named_test: bool = switch (declaration.name) { + .@"comptime" => info: { + if (iter.pass != .unnamed) return; + const i = iter.comptime_index; + iter.comptime_index += 1; + break :info .{ + try iter.avoidNameConflict("comptime_{d}", .{i}), + .@"comptime", + false, + }; + }, + .@"usingnamespace" => info: { + // TODO: this isn't right! These should be considered unnamed. Name conflicts can happen here. + // The problem is, we need to preserve the decl ordering for `@typeInfo`. + // I'm not bothering to fix this now, since some upcoming changes will change this code significantly anyway. + if (iter.pass != .named) return; + const i = iter.usingnamespace_index; + iter.usingnamespace_index += 1; + break :info .{ + try iter.avoidNameConflict("usingnamespace_{d}", .{i}), + .@"usingnamespace", + false, + }; + }, + .unnamed_test => info: { + if (iter.pass != .unnamed) return; + const i = iter.unnamed_test_index; + iter.unnamed_test_index += 1; + break :info .{ + try iter.avoidNameConflict("test_{d}", .{i}), + .@"test", + false, + }; + }, + .decltest => info: { + // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. + if (iter.pass != .unnamed) return; + assert(declaration.flags.has_doc_comment); + const name = zir.nullTerminatedString(@enumFromInt(zir.extra[extra.end])); + break :info .{ + try iter.avoidNameConflict("decltest.{s}", .{name}), + .@"test", + true, + }; + }, + _ => if (declaration.name.isNamedTest(zir)) info: { + // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. + if (iter.pass != .unnamed) return; + break :info .{ + try iter.avoidNameConflict("test.{s}", .{zir.nullTerminatedString(declaration.name.toString(zir).?)}), + .@"test", + true, + }; + } else info: { + if (iter.pass != .named) return; + const name = try ip.getOrPutString( + gpa, + pt.tid, + zir.nullTerminatedString(declaration.name.toString(zir).?), + .no_embedded_nulls, + ); + try iter.seen_decls.putNoClobber(gpa, name, {}); + break :info .{ + name, + .named, + false, + }; + }, + }; + + switch (kind) { + .@"usingnamespace" => try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1), + .@"test" => try zcu.test_functions.ensureUnusedCapacity(gpa, 1), + else => {}, + } + + const parent_file_scope_index = iter.parent_decl.getFileScopeIndex(zcu); + const tracked_inst = try ip.trackZir(gpa, parent_file_scope_index, decl_inst); + + // We create a Decl for it regardless of analysis status. + + const prev_exported, const decl_index = if (iter.existing_by_inst.get(tracked_inst)) |decl_index| decl_index: { + // We need only update this existing Decl. + const decl = zcu.declPtr(decl_index); + const was_exported = decl.is_exported; + assert(decl.kind == kind); // ZIR tracking should preserve this + decl.name = decl_name; + decl.is_pub = declaration.flags.is_pub; + decl.is_exported = declaration.flags.is_export; + break :decl_index .{ was_exported, decl_index }; + } else decl_index: { + // Create and set up a new Decl. + const new_decl_index = try zcu.allocateNewDecl(namespace_index); + const new_decl = zcu.declPtr(new_decl_index); + new_decl.kind = kind; + new_decl.name = decl_name; + new_decl.is_pub = declaration.flags.is_pub; + new_decl.is_exported = declaration.flags.is_export; + new_decl.zir_decl_index = tracked_inst.toOptional(); + break :decl_index .{ false, new_decl_index }; + }; + + const decl = zcu.declPtr(decl_index); + + namespace.decls.putAssumeCapacityNoClobberContext(decl_index, {}, .{ .zcu = zcu }); + + const comp = zcu.comp; + const decl_mod = namespace.fileScope(zcu).mod; + const want_analysis = declaration.flags.is_export or switch (kind) { + .anon => unreachable, + .@"comptime" => true, + .@"usingnamespace" => a: { + namespace.usingnamespace_set.putAssumeCapacityNoClobber(decl_index, declaration.flags.is_pub); + break :a true; + }, + .named => false, + .@"test" => a: { + if (!comp.config.is_test) break :a false; + if (decl_mod != zcu.main_mod) break :a false; + if (is_named_test and comp.test_filters.len > 0) { + const decl_fqn = try namespace.fullyQualifiedName(pt, decl_name); + const decl_fqn_slice = decl_fqn.toSlice(ip); + for (comp.test_filters) |test_filter| { + if (std.mem.indexOf(u8, decl_fqn_slice, test_filter)) |_| break; + } else break :a false; + } + zcu.test_functions.putAssumeCapacity(decl_index, {}); // may clobber on incremental update + break :a true; + }, + }; + + if (want_analysis) { + // We will not queue analysis if the decl has been analyzed on a previous update and + // `is_export` is unchanged. In this case, the incremental update mechanism will handle + // re-analysis for us if necessary. + if (prev_exported != declaration.flags.is_export or decl.analysis == .unreferenced) { + log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{}' decl_index={d}", .{ + namespace.fileScope(zcu).sub_file_path, decl_name.fmt(ip), decl_index, + }); + comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = decl_index }); + } + } + + if (decl.getOwnedFunction(zcu) != null) { + // TODO this logic is insufficient; namespaces we don't re-scan may still require + // updated line numbers. Look into this! + // TODO Look into detecting when this would be unnecessary by storing enough state + // in `Decl` to notice that the line number did not change. + comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index }); + } + } +}; + pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: Allocator) Zcu.SemaError!Air { const tracy = trace(@src()); defer tracy.end(); @@ -1038,12 +1708,12 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); - log.debug("func name '{}'", .{(try decl.fullyQualifiedName(mod)).fmt(ip)}); + log.debug("func name '{}'", .{(try decl.fullyQualifiedName(pt)).fmt(ip)}); defer blk: { - log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)}); + log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(pt) catch break :blk).fmt(ip)}); } - const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); + const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0); defer decl_prog_node.end(); mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); @@ -1273,6 +1943,19 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All }; } +fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void { + switch (file.status) { + .success_zir, .retryable_failure => {}, + .never_loaded, .parse_failure, .astgen_failure => { + pt.zcu.comp.mutex.lock(); + defer pt.zcu.comp.mutex.unlock(); + if (pt.zcu.failed_files.fetchSwapRemove(file)) |kv| { + if (kv.value) |msg| msg.destroy(pt.zcu.gpa); // Delete previous error message. + } + }, + } +} + /// Called from `Compilation.update`, after everything is done, just before /// reporting compile errors. In this function we emit exported symbol collision /// errors and communicate exported symbols to the linker backend. @@ -1397,7 +2080,7 @@ pub fn populateTestFunctions( const root_decl_index = zcu.fileRootDecl(builtin_file_index); const root_decl = zcu.declPtr(root_decl_index.unwrap().?); const builtin_namespace = zcu.namespacePtr(root_decl.src_namespace); - const test_functions_str = try ip.getOrPutString(gpa, "test_functions", .no_embedded_nulls); + const test_functions_str = try ip.getOrPutString(gpa, pt.tid, "test_functions", .no_embedded_nulls); const decl_index = builtin_namespace.decls.getKeyAdapted( test_functions_str, Zcu.DeclAdapter{ .zcu = zcu }, @@ -1424,7 +2107,7 @@ pub fn populateTestFunctions( for (test_fn_vals, zcu.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = zcu.declPtr(test_decl_index); - const test_decl_name = try test_decl.fullyQualifiedName(zcu); + const test_decl_name = try test_decl.fullyQualifiedName(pt); const test_decl_name_len = test_decl_name.length(ip); const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = n: { const test_name_ty = try pt.arrayType(.{ @@ -1530,7 +2213,7 @@ pub fn linkerUpdateDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !void { const decl = zcu.declPtr(decl_index); - const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool), 0); + const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(&zcu.intern_pool), 0); defer codegen_prog_node.end(); if (comp.bin_file) |lf| { @@ -2064,11 +2747,11 @@ pub fn getBuiltinDecl(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Inter const std_file_imported = zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig"); const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index).unwrap().?; const std_namespace = zcu.declPtr(std_file_root_decl).getOwnedInnerNamespace(zcu).?; - const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); + const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls); const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); pt.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt"); const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt"); - const name_str = try ip.getOrPutString(gpa, name, .no_embedded_nulls); + const name_str = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls); return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt"); } @@ -2082,6 +2765,8 @@ pub fn getBuiltinType(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Type const Air = @import("../Air.zig"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; +const Ast = std.zig.Ast; +const AstGen = std.zig.AstGen; const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const build_options = @import("build_options"); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index a8ac674e07..8873c5cb1b 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2204,14 +2204,14 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const func_val = (try func.air.value(pl_op.operand, pt)) orelse break :blk null; if (func_val.getFunction(mod)) |function| { - _ = try func.bin_file.getOrCreateAtomForDecl(function.owner_decl); + _ = try func.bin_file.getOrCreateAtomForDecl(pt, function.owner_decl); break :blk function.owner_decl; } else if (func_val.getExternFunc(mod)) |extern_func| { const ext_decl = mod.declPtr(extern_func.decl); const ext_info = mod.typeToFunc(ext_decl.typeOf(mod)).?; var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), Type.fromInterned(ext_info.return_type), pt); defer func_type.deinit(func.gpa); - const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl); + const atom_index = try func.bin_file.getOrCreateAtomForDecl(pt, extern_func.decl); const atom = func.bin_file.getAtomPtr(atom_index); const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type); try func.bin_file.addOrUpdateImport( @@ -2224,7 +2224,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else switch (mod.intern_pool.indexToKey(func_val.ip_index)) { .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { .decl => |decl| { - _ = try func.bin_file.getOrCreateAtomForDecl(decl); + _ = try func.bin_file.getOrCreateAtomForDecl(pt, decl); break :blk decl; }, else => {}, @@ -3227,7 +3227,7 @@ fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u return WValue{ .imm32 = 0xaaaaaaaa }; } - const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index); + const atom_index = try func.bin_file.getOrCreateAtomForDecl(pt, decl_index); const atom = func.bin_file.getAtom(atom_index); const target_sym_index = @intFromEnum(atom.sym_index); @@ -7284,7 +7284,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_decl_index).fullyQualifiedName(mod); + const fqn = try mod.declPtr(enum_decl_index).fullyQualifiedName(pt); const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{fqn.fmt(ip)}); // check if we already generated code for this. diff --git a/src/codegen.zig b/src/codegen.zig index 5fc8ef174f..0513682d73 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -756,7 +756,7 @@ fn lowerDeclRef( return Result.ok; } - const vaddr = try lf.getDeclVAddr(decl_index, .{ + const vaddr = try lf.getDeclVAddr(pt, decl_index, .{ .parent_atom_index = reloc_info.parent_atom_index, .offset = code.items.len, .addend = @intCast(offset), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ca574070bf..0f13c9fd9b 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1744,7 +1744,7 @@ pub const Object = struct { if (export_indices.len != 0) { return updateExportedGlobal(self, zcu, global_index, export_indices); } else { - const fqn = try self.builder.strtabString((try decl.fullyQualifiedName(zcu)).toSlice(ip)); + const fqn = try self.builder.strtabString((try decl.fullyQualifiedName(pt)).toSlice(ip)); try global_index.rename(fqn, &self.builder); global_index.setLinkage(.internal, &self.builder); if (comp.config.dll_export_fns) @@ -2520,7 +2520,7 @@ pub const Object = struct { const field_offset = ty.structFieldOffset(field_index, pt); const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse - try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); + try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); fields.appendAssumeCapacity(try o.builder.debugMemberType( try o.builder.metadataString(field_name.toSlice(ip)), @@ -2807,17 +2807,18 @@ pub const Object = struct { } fn getStackTraceType(o: *Object) Allocator.Error!Type { - const zcu = o.pt.zcu; + const pt = o.pt; + const zcu = pt.zcu; const std_mod = zcu.std_mod; const std_file_imported = zcu.importPkg(std_mod) catch unreachable; - const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "builtin", .no_embedded_nulls); + const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "builtin", .no_embedded_nulls); const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index); const std_namespace = zcu.namespacePtr(zcu.declPtr(std_file_root_decl.unwrap().?).src_namespace); const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }).?; - const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "StackTrace", .no_embedded_nulls); + const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "StackTrace", .no_embedded_nulls); // buffer is only used for int_type, `builtin` is a struct. const builtin_ty = zcu.declPtr(builtin_decl).val.toType(); const builtin_namespace = zcu.namespacePtrUnwrap(builtin_ty.getNamespaceIndex(zcu)).?; @@ -2865,7 +2866,7 @@ pub const Object = struct { try o.builder.strtabString((if (is_extern) decl.name else - try decl.fullyQualifiedName(zcu)).toSlice(ip)), + try decl.fullyQualifiedName(pt)).toSlice(ip)), toLlvmAddressSpace(decl.@"addrspace", target), ); gop.value_ptr.* = function_index.ptrConst(&o.builder).global; @@ -3074,7 +3075,8 @@ pub const Object = struct { if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable; errdefer assert(o.decl_map.remove(decl_index)); - const zcu = o.pt.zcu; + const pt = o.pt; + const zcu = pt.zcu; const decl = zcu.declPtr(decl_index); const is_extern = decl.isExtern(zcu); @@ -3082,7 +3084,7 @@ pub const Object = struct { try o.builder.strtabString((if (is_extern) decl.name else - try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool)), + try decl.fullyQualifiedName(pt)).toSlice(&zcu.intern_pool)), try o.lowerType(decl.typeOf(zcu)), toLlvmGlobalAddressSpace(decl.@"addrspace", zcu.getTarget()), ); @@ -3310,7 +3312,7 @@ pub const Object = struct { return int_ty; } - const fqn = try mod.declPtr(struct_type.decl.unwrap().?).fullyQualifiedName(mod); + const fqn = try mod.declPtr(struct_type.decl.unwrap().?).fullyQualifiedName(pt); var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){}; defer llvm_field_types.deinit(o.gpa); @@ -3464,7 +3466,7 @@ pub const Object = struct { return enum_tag_ty; } - const fqn = try mod.declPtr(union_obj.decl).fullyQualifiedName(mod); + const fqn = try mod.declPtr(union_obj.decl).fullyQualifiedName(pt); const aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[layout.most_aligned_field]); const aligned_field_llvm_ty = try o.lowerType(aligned_field_ty); @@ -3525,7 +3527,7 @@ pub const Object = struct { const gop = try o.type_map.getOrPut(o.gpa, t.toIntern()); if (!gop.found_existing) { const decl = mod.declPtr(ip.loadOpaqueType(t.toIntern()).decl); - const fqn = try decl.fullyQualifiedName(mod); + const fqn = try decl.fullyQualifiedName(pt); gop.value_ptr.* = try o.builder.opaqueType(try o.builder.string(fqn.toSlice(ip))); } return gop.value_ptr.*; @@ -4585,7 +4587,7 @@ pub const Object = struct { const usize_ty = try o.lowerType(Type.usize); const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0); - const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(zcu); + const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(pt); const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(ret_ty, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal), @@ -5173,7 +5175,7 @@ pub const FuncGen = struct { const line_number = decl.navSrcLine(zcu) + 1; self.inlined = self.wip.debug_location; - const fqn = try decl.fullyQualifiedName(zcu); + const fqn = try decl.fullyQualifiedName(pt); const fn_ty = try pt.funcType(.{ .param_types = &.{}, @@ -9707,7 +9709,7 @@ pub const FuncGen = struct { if (gop.found_existing) return gop.value_ptr.*; errdefer assert(o.named_enum_map.remove(enum_type.decl)); - const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(zcu); + const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(pt); const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(.i1, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 95874a5d65..92cff8b2d0 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1753,7 +1753,7 @@ const DeclGen = struct { } const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse - try ip.getOrPutStringFmt(mod.gpa, "{d}", .{field_index}, .no_embedded_nulls); + try ip.getOrPutStringFmt(mod.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); try member_types.append(try self.resolveType(field_ty, .indirect)); try member_names.append(field_name.toSlice(ip)); } @@ -3012,7 +3012,7 @@ const DeclGen = struct { // Append the actual code into the functions section. try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.fullyQualifiedName(self.pt.zcu); + const fqn = try decl.fullyQualifiedName(self.pt); try self.spv.debugName(result_id, fqn.toSlice(ip)); // Temporarily generate a test kernel declaration if this is a test function. @@ -3041,7 +3041,7 @@ const DeclGen = struct { .storage_class = final_storage_class, }); - const fqn = try decl.fullyQualifiedName(self.pt.zcu); + const fqn = try decl.fullyQualifiedName(self.pt); try self.spv.debugName(result_id, fqn.toSlice(ip)); try self.spv.declareDeclDeps(spv_decl_index, &.{}); }, @@ -3086,7 +3086,7 @@ const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {}); try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.fullyQualifiedName(self.pt.zcu); + const fqn = try decl.fullyQualifiedName(self.pt); try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{fqn.fmt(ip)}); try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{ diff --git a/src/link.zig b/src/link.zig index db19a16d4d..f407ad2f4c 100644 --- a/src/link.zig +++ b/src/link.zig @@ -424,14 +424,14 @@ pub const File = struct { } } - pub fn updateDeclLineNumber(base: *File, module: *Zcu, decl_index: InternPool.DeclIndex) UpdateDeclError!void { - const decl = module.declPtr(decl_index); + pub fn updateDeclLineNumber(base: *File, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) UpdateDeclError!void { + const decl = pt.zcu.declPtr(decl_index); assert(decl.has_tv); switch (base.tag) { .spirv, .nvptx => {}, inline else => |tag| { if (tag != .c and build_options.only_c) unreachable; - return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDeclLineNumber(module, decl_index); + return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDeclLineNumber(pt, decl_index); }, } } @@ -626,14 +626,14 @@ pub const File = struct { /// `Decl`'s address was not yet resolved, or the containing atom gets moved in virtual memory. /// May be called before or after updateFunc/updateDecl therefore it is up to the linker to allocate /// the block/atom. - pub fn getDeclVAddr(base: *File, decl_index: InternPool.DeclIndex, reloc_info: RelocInfo) !u64 { + pub fn getDeclVAddr(base: *File, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: RelocInfo) !u64 { if (build_options.only_c) @compileError("unreachable"); switch (base.tag) { .c => unreachable, .spirv => unreachable, .nvptx => unreachable, inline else => |tag| { - return @as(*tag.Type(), @fieldParentPtr("base", base)).getDeclVAddr(decl_index, reloc_info); + return @as(*tag.Type(), @fieldParentPtr("base", base)).getDeclVAddr(pt, decl_index, reloc_info); }, } } diff --git a/src/link/C.zig b/src/link/C.zig index 3db5952a4c..1a6cee068e 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -383,11 +383,11 @@ pub fn updateDecl(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) gop.value_ptr.fwd_decl = try self.addString(object.dg.fwd_decl.items); } -pub fn updateDeclLineNumber(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { // The C backend does not have the ability to fix line numbers without re-generating // the entire Decl. _ = self; - _ = zcu; + _ = pt; _ = decl_index; } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 7ef5bde6e6..bd1c96bf8b 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1176,7 +1176,7 @@ pub fn lowerUnnamedConst(self: *Coff, pt: Zcu.PerThread, val: Value, decl_index: gop.value_ptr.* = .{}; } const unnamed_consts = gop.value_ptr; - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); defer gpa.free(sym_name); @@ -1427,7 +1427,7 @@ fn updateDeclCode(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclInd const mod = pt.zcu; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); const required_alignment: u32 = @intCast(decl.getAlignment(pt).toByteUnits() orelse 0); @@ -1855,7 +1855,7 @@ pub fn flushModule(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no assert(!self.imports_count_dirty); } -pub fn getDeclVAddr(self: *Coff, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { +pub fn getDeclVAddr(self: *Coff, _: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { assert(self.llvm_object == null); const this_atom_index = try self.getOrCreateAtomForDecl(decl_index); @@ -1972,9 +1972,9 @@ pub fn getGlobalSymbol(self: *Coff, name: []const u8, lib_name_name: ?[]const u8 return global_index; } -pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { _ = self; - _ = module; + _ = pt; _ = decl_index; log.debug("TODO implement updateDeclLineNumber", .{}); } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 80c88666bc..9ae4ee3be6 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -1082,7 +1082,7 @@ pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.Dec defer tracy.end(); const decl = pt.zcu.declPtr(decl_index); - const decl_linkage_name = try decl.fullyQualifiedName(pt.zcu); + const decl_linkage_name = try decl.fullyQualifiedName(pt); log.debug("initDeclState {}{*}", .{ decl_linkage_name.fmt(&pt.zcu.intern_pool), decl }); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 39704d937c..579df0760a 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -543,7 +543,7 @@ pub fn deinit(self: *Elf) void { self.comdat_group_sections.deinit(gpa); } -pub fn getDeclVAddr(self: *Elf, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { +pub fn getDeclVAddr(self: *Elf, _: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { assert(self.llvm_object == null); return self.zigObjectPtr().?.getDeclVAddr(self, decl_index, reloc_info); } @@ -3021,9 +3021,9 @@ pub fn updateExports( return self.zigObjectPtr().?.updateExports(self, pt, exported, export_indices); } -pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *Elf, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (self.llvm_object) |_| return; - return self.zigObjectPtr().?.updateDeclLineNumber(mod, decl_index); + return self.zigObjectPtr().?.updateDeclLineNumber(pt, decl_index); } pub fn deleteExport( diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 8cfa5e701f..7a419750d4 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -908,7 +908,7 @@ fn updateDeclCode( const gpa = elf_file.base.comp.gpa; const mod = pt.zcu; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); @@ -1009,7 +1009,7 @@ fn updateTlv( const mod = pt.zcu; const gpa = mod.gpa; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl }); @@ -1286,7 +1286,7 @@ pub fn lowerUnnamedConst( } const unnamed_consts = gop.value_ptr; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); defer gpa.free(name); @@ -1466,19 +1466,19 @@ pub fn updateExports( /// Must be called only after a successful call to `updateDecl`. pub fn updateDeclLineNumber( self: *ZigObject, - mod: *Module, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, ) !void { const tracy = trace(@src()); defer tracy.end(); - const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl = pt.zcu.declPtr(decl_index); + const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); + log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&pt.zcu.intern_pool), decl }); if (self.dwarf) |*dw| { - try dw.updateDeclLineNumber(mod, decl_index); + try dw.updateDeclLineNumber(pt.zcu, decl_index); } } diff --git a/src/link/MachO.zig b/src/link/MachO.zig index d0c78bc2c2..ff083d367c 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -3198,9 +3198,9 @@ pub fn updateDecl(self: *MachO, pt: Zcu.PerThread, decl_index: InternPool.DeclIn return self.getZigObject().?.updateDecl(self, pt, decl_index); } -pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *MachO, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (self.llvm_object) |_| return; - return self.getZigObject().?.updateDeclLineNumber(module, decl_index); + return self.getZigObject().?.updateDeclLineNumber(pt, decl_index); } pub fn updateExports( @@ -3230,7 +3230,7 @@ pub fn freeDecl(self: *MachO, decl_index: InternPool.DeclIndex) void { return self.getZigObject().?.freeDecl(decl_index); } -pub fn getDeclVAddr(self: *MachO, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { +pub fn getDeclVAddr(self: *MachO, _: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { assert(self.llvm_object == null); return self.getZigObject().?.getDeclVAddr(self, decl_index, reloc_info); } diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index ffe362038d..03e659c497 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -810,7 +810,7 @@ fn updateDeclCode( const gpa = macho_file.base.comp.gpa; const mod = pt.zcu; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); @@ -893,13 +893,12 @@ fn updateTlv( sect_index: u8, code: []const u8, ) !void { - const mod = pt.zcu; - const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl = pt.zcu.declPtr(decl_index); + const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl }); + log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&pt.zcu.intern_pool), decl }); - const decl_name_slice = decl_name.toSlice(&mod.intern_pool); + const decl_name_slice = decl_name.toSlice(&pt.zcu.intern_pool); const required_alignment = decl.getAlignment(pt); // 1. Lower TLV initializer @@ -1100,7 +1099,7 @@ pub fn lowerUnnamedConst( } const unnamed_consts = gop.value_ptr; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); defer gpa.free(name); @@ -1363,9 +1362,9 @@ fn updateLazySymbol( } /// Must be called only after a successful call to `updateDecl`. -pub fn updateDeclLineNumber(self: *ZigObject, mod: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *ZigObject, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (self.dwarf) |*dw| { - try dw.updateDeclLineNumber(mod, decl_index); + try dw.updateDeclLineNumber(pt.zcu, decl_index); } } diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 827c974180..cfc8435906 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -483,7 +483,7 @@ pub fn lowerUnnamedConst(self: *Plan9, pt: Zcu.PerThread, val: Value, decl_index } const unnamed_consts = gop.value_ptr; - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; // name is freed when the unnamed const is freed @@ -1496,22 +1496,22 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { } /// Must be called only after a successful call to `updateDecl`. -pub fn updateDeclLineNumber(self: *Plan9, mod: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *Plan9, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { _ = self; - _ = mod; + _ = pt; _ = decl_index; } pub fn getDeclVAddr( self: *Plan9, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo, ) !u64 { - const mod = self.base.comp.module.?; - const ip = &mod.intern_pool; - const decl = mod.declPtr(decl_index); + const ip = &pt.zcu.intern_pool; + const decl = pt.zcu.declPtr(decl_index); log.debug("getDeclVAddr for {}", .{decl.name.fmt(ip)}); - if (decl.isExtern(mod)) { + if (decl.isExtern(pt.zcu)) { if (decl.name.eqlSlice("etext", ip)) { try self.addReloc(reloc_info.parent_atom_index, .{ .target = undefined, diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 4e661e33e4..32af004132 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1457,9 +1457,9 @@ pub fn updateDecl(wasm: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclInd try wasm.zigObjectPtr().?.updateDecl(wasm, pt, decl_index); } -pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(wasm: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (wasm.llvm_object) |_| return; - try wasm.zigObjectPtr().?.updateDeclLineNumber(mod, decl_index); + try wasm.zigObjectPtr().?.updateDeclLineNumber(pt, decl_index); } /// From a given symbol location, returns its `wasm.GlobalType`. @@ -1521,10 +1521,11 @@ pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !Sy /// Returns the given pointer address pub fn getDeclVAddr( wasm: *Wasm, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo, ) !u64 { - return wasm.zigObjectPtr().?.getDeclVAddr(wasm, decl_index, reloc_info); + return wasm.zigObjectPtr().?.getDeclVAddr(wasm, pt, decl_index, reloc_info); } pub fn lowerAnonDecl( @@ -4016,8 +4017,8 @@ pub fn getErrorTableSymbol(wasm_file: *Wasm, pt: Zcu.PerThread) !u32 { /// For a given `InternPool.DeclIndex` returns its corresponding `Atom.Index`. /// When the index was not found, a new `Atom` will be created, and its index will be returned. /// The newly created Atom is empty with default fields as specified by `Atom.empty`. -pub fn getOrCreateAtomForDecl(wasm_file: *Wasm, decl_index: InternPool.DeclIndex) !Atom.Index { - return wasm_file.zigObjectPtr().?.getOrCreateAtomForDecl(wasm_file, decl_index); +pub fn getOrCreateAtomForDecl(wasm_file: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !Atom.Index { + return wasm_file.zigObjectPtr().?.getOrCreateAtomForDecl(wasm_file, pt, decl_index); } /// Verifies all resolved symbols and checks whether itself needs to be marked alive, diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index a693902743..f95c8fc794 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -253,7 +253,7 @@ pub fn updateDecl( } const gpa = wasm_file.base.comp.gpa; - const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const atom = wasm_file.getAtomPtr(atom_index); atom.clear(); @@ -302,7 +302,7 @@ pub fn updateFunc( const func = pt.zcu.funcInfo(func_index); const decl_index = func.owner_decl; const decl = pt.zcu.declPtr(decl_index); - const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const atom = wasm_file.getAtomPtr(atom_index); atom.clear(); @@ -346,7 +346,7 @@ fn finishUpdateDecl( const atom_index = decl_info.atom; const atom = wasm_file.getAtomPtr(atom_index); const sym = zig_object.symbol(atom.sym_index); - const full_name = try decl.fullyQualifiedName(zcu); + const full_name = try decl.fullyQualifiedName(pt); sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(ip)); try atom.code.appendSlice(gpa, code); atom.size = @intCast(code.len); @@ -424,17 +424,21 @@ fn createDataSegment( /// For a given `InternPool.DeclIndex` returns its corresponding `Atom.Index`. /// When the index was not found, a new `Atom` will be created, and its index will be returned. /// The newly created Atom is empty with default fields as specified by `Atom.empty`. -pub fn getOrCreateAtomForDecl(zig_object: *ZigObject, wasm_file: *Wasm, decl_index: InternPool.DeclIndex) !Atom.Index { - const gpa = wasm_file.base.comp.gpa; +pub fn getOrCreateAtomForDecl( + zig_object: *ZigObject, + wasm_file: *Wasm, + pt: Zcu.PerThread, + decl_index: InternPool.DeclIndex, +) !Atom.Index { + const gpa = pt.zcu.gpa; const gop = try zig_object.decls_map.getOrPut(gpa, decl_index); if (!gop.found_existing) { const sym_index = try zig_object.allocateSymbol(gpa); gop.value_ptr.* = .{ .atom = try wasm_file.createAtom(sym_index, zig_object.index) }; - const mod = wasm_file.base.comp.module.?; - const decl = mod.declPtr(decl_index); - const full_name = try decl.fullyQualifiedName(mod); + const decl = pt.zcu.declPtr(decl_index); + const full_name = try decl.fullyQualifiedName(pt); const sym = zig_object.symbol(sym_index); - sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&mod.intern_pool)); + sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&pt.zcu.intern_pool)); } return gop.value_ptr.atom; } @@ -487,10 +491,10 @@ pub fn lowerUnnamedConst( std.debug.assert(val.typeOf(mod).zigTypeTag(mod) != .Fn); // cannot create local symbols for functions const decl = mod.declPtr(decl_index); - const parent_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const parent_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const parent_atom = wasm_file.getAtom(parent_atom_index); const local_index = parent_atom.locals.items.len; - const fqn = try decl.fullyQualifiedName(mod); + const fqn = try decl.fullyQualifiedName(pt); const name = try std.fmt.allocPrintZ(gpa, "__unnamed_{}_{d}", .{ fqn.fmt(&mod.intern_pool), local_index, }); @@ -775,22 +779,22 @@ pub fn getGlobalSymbol(zig_object: *ZigObject, gpa: std.mem.Allocator, name: []c pub fn getDeclVAddr( zig_object: *ZigObject, wasm_file: *Wasm, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo, ) !u64 { const target = wasm_file.base.comp.root_mod.resolved_target.result; - const gpa = wasm_file.base.comp.gpa; - const mod = wasm_file.base.comp.module.?; - const decl = mod.declPtr(decl_index); + const gpa = pt.zcu.gpa; + const decl = pt.zcu.declPtr(decl_index); - const target_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const target_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const target_symbol_index = @intFromEnum(wasm_file.getAtom(target_atom_index).sym_index); std.debug.assert(reloc_info.parent_atom_index != 0); const atom_index = wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = @enumFromInt(reloc_info.parent_atom_index) }).?; const atom = wasm_file.getAtomPtr(atom_index); const is_wasm32 = target.cpu.arch == .wasm32; - if (decl.typeOf(mod).zigTypeTag(mod) == .Fn) { + if (decl.typeOf(pt.zcu).zigTypeTag(pt.zcu) == .Fn) { std.debug.assert(reloc_info.addend == 0); // addend not allowed for function relocations try atom.relocs.append(gpa, .{ .index = target_symbol_index, @@ -890,7 +894,7 @@ pub fn updateExports( }, }; const decl = mod.declPtr(decl_index); - const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const decl_info = zig_object.decls_map.getPtr(decl_index).?; const atom = wasm_file.getAtom(atom_index); const atom_sym = atom.symbolLoc().getSymbol(wasm_file).*; @@ -1116,13 +1120,17 @@ pub fn createDebugSectionForIndex(zig_object: *ZigObject, wasm_file: *Wasm, inde return atom_index; } -pub fn updateDeclLineNumber(zig_object: *ZigObject, mod: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber( + zig_object: *ZigObject, + pt: Zcu.PerThread, + decl_index: InternPool.DeclIndex, +) !void { if (zig_object.dwarf) |*dw| { - const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl = pt.zcu.declPtr(decl_index); + const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); - try dw.updateDeclLineNumber(mod, decl_index); + log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&pt.zcu.intern_pool), decl }); + try dw.updateDeclLineNumber(pt.zcu, decl_index); } } diff --git a/src/mutable_value.zig b/src/mutable_value.zig index 0ca2d1d317..63f198dfa7 100644 --- a/src/mutable_value.zig +++ b/src/mutable_value.zig @@ -71,7 +71,7 @@ pub const MutableValue = union(enum) { } }), .bytes => |b| try pt.intern(.{ .aggregate = .{ .ty = b.ty, - .storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, b.data, .maybe_embedded_nulls) }, + .storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, pt.tid, b.data, .maybe_embedded_nulls) }, } }), .aggregate => |a| { const elems = try arena.alloc(InternPool.Index, a.elems.len); -- cgit v1.2.3