From 525f341f33af9b8aad53931fd5511f00a82cb090 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 15 Jun 2024 16:10:53 -0400 Subject: Zcu: introduce `PerThread` and pass to all the functions --- src/Compilation.zig | 60 +++++++++++++++++++++++++++-------------------------- 1 file changed, 31 insertions(+), 29 deletions(-) (limited to 'src/Compilation.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index eda5f63a58..d3ff338080 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2146,6 +2146,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { try comp.performAllTheWork(main_progress_node); if (comp.module) |zcu| { + const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main }; + if (build_options.enable_debug_extensions and comp.verbose_intern_pool) { std.debug.print("intern pool stats for '{s}':\n", .{ comp.root_name, @@ -2165,10 +2167,10 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { // The `test_functions` decl has been intentionally postponed until now, // at which point we must populate it with the list of test functions that // have been discovered and not filtered out. - try zcu.populateTestFunctions(main_progress_node); + try pt.populateTestFunctions(main_progress_node); } - try zcu.processExports(); + try pt.processExports(); } if (comp.totalErrorCount() != 0) { @@ -2247,7 +2249,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } } - try flush(comp, arena, main_progress_node); + try flush(comp, arena, .main, main_progress_node); if (comp.totalErrorCount() != 0) return; // Failure here only means an unnecessary cache miss. @@ -2264,16 +2266,16 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { whole.lock = man.toOwnedLock(); }, .incremental => { - try flush(comp, arena, main_progress_node); + try flush(comp, arena, .main, main_progress_node); if (comp.totalErrorCount() != 0) return; }, } } -fn flush(comp: *Compilation, arena: Allocator, prog_node: std.Progress.Node) !void { +fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { if (comp.bin_file) |lf| { // This is needed before reading the error flags. - lf.flush(arena, prog_node) catch |err| switch (err) { + lf.flush(arena, tid, prog_node) catch |err| switch (err) { error.FlushFailure => {}, // error reported through link_error_flags error.LLDReportedFailure => {}, // error reported via lockAndParseLldStderr else => |e| return e, @@ -3419,7 +3421,7 @@ pub fn performAllTheWork( while (true) { if (comp.work_queue.readItem()) |work_item| { - try processOneJob(comp, work_item, main_progress_node); + try processOneJob(0, comp, work_item, main_progress_node); continue; } if (comp.module) |zcu| { @@ -3447,11 +3449,11 @@ pub fn performAllTheWork( } } -fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !void { +fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progress.Node) !void { switch (job) { .codegen_decl => |decl_index| { - const zcu = comp.module.?; - const decl = zcu.declPtr(decl_index); + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + const decl = pt.zcu.declPtr(decl_index); switch (decl.analysis) { .unreferenced => unreachable, @@ -3469,7 +3471,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo assert(decl.has_tv); - try zcu.linkerUpdateDecl(decl_index); + try pt.linkerUpdateDecl(decl_index); return; }, } @@ -3478,16 +3480,16 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo const named_frame = tracy.namedFrame("codegen_func"); defer named_frame.end(); - const zcu = comp.module.?; + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; // This call takes ownership of `func.air`. - try zcu.linkerUpdateFunc(func.func, func.air); + try pt.linkerUpdateFunc(func.func, func.air); }, .analyze_func => |func| { const named_frame = tracy.namedFrame("analyze_func"); defer named_frame.end(); - const zcu = comp.module.?; - zcu.ensureFuncBodyAnalyzed(func) catch |err| switch (err) { + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + pt.ensureFuncBodyAnalyzed(func) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; @@ -3496,8 +3498,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo if (true) @panic("regressed compiler feature: emit-h should hook into updateExports, " ++ "not decl analysis, which is too early to know about @export calls"); - const zcu = comp.module.?; - const decl = zcu.declPtr(decl_index); + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + const decl = pt.zcu.declPtr(decl_index); switch (decl.analysis) { .unreferenced => unreachable, @@ -3515,7 +3517,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo defer named_frame.end(); const gpa = comp.gpa; - const emit_h = zcu.emit_h.?; + const emit_h = pt.zcu.emit_h.?; _ = try emit_h.decl_table.getOrPut(gpa, decl_index); const decl_emit_h = emit_h.declPtr(decl_index); const fwd_decl = &decl_emit_h.fwd_decl; @@ -3523,11 +3525,11 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo var ctypes_arena = std.heap.ArenaAllocator.init(gpa); defer ctypes_arena.deinit(); - const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu); + const file_scope = pt.zcu.namespacePtr(decl.src_namespace).fileScope(pt.zcu); var dg: c_codegen.DeclGen = .{ .gpa = gpa, - .zcu = zcu, + .pt = pt, .mod = file_scope.mod, .error_msg = null, .pass = .{ .decl = decl_index }, @@ -3557,25 +3559,25 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo } }, .analyze_decl => |decl_index| { - const zcu = comp.module.?; - zcu.ensureDeclAnalyzed(decl_index) catch |err| switch (err) { + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + pt.ensureDeclAnalyzed(decl_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; - const decl = zcu.declPtr(decl_index); + const decl = pt.zcu.declPtr(decl_index); if (decl.kind == .@"test" and comp.config.is_test) { // Tests are always emitted in test binaries. The decl_refs are created by // Zcu.populateTestFunctions, but this will not queue body analysis, so do // that now. - try zcu.ensureFuncBodyAnalysisQueued(decl.val.toIntern()); + try pt.zcu.ensureFuncBodyAnalysisQueued(decl.val.toIntern()); } }, .resolve_type_fully => |ty| { const named_frame = tracy.namedFrame("resolve_type_fully"); defer named_frame.end(); - const zcu = comp.module.?; - Type.fromInterned(ty).resolveFully(zcu) catch |err| switch (err) { + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + Type.fromInterned(ty).resolveFully(pt) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; @@ -3603,12 +3605,12 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo try zcu.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); }; }, - .analyze_mod => |pkg| { + .analyze_mod => |mod| { const named_frame = tracy.namedFrame("analyze_mod"); defer named_frame.end(); - const zcu = comp.module.?; - zcu.semaPkg(pkg) catch |err| switch (err) { + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + pt.semaPkg(mod) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; -- cgit v1.2.3 From ca02266157ee72e41068672c8ca6f928fcbf6fdf Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 15 Jun 2024 19:57:47 -0400 Subject: Zcu: pass `PerThread` to intern pool string functions --- src/Compilation.zig | 89 +++--- src/InternPool.zig | 21 +- src/Sema.zig | 262 ++++++++-------- src/Value.zig | 4 +- src/Zcu.zig | 687 +--------------------------------------- src/Zcu/PerThread.zig | 725 +++++++++++++++++++++++++++++++++++++++++-- src/arch/wasm/CodeGen.zig | 10 +- src/codegen.zig | 2 +- src/codegen/llvm.zig | 30 +- src/codegen/spirv.zig | 8 +- src/link.zig | 10 +- src/link/C.zig | 4 +- src/link/Coff.zig | 10 +- src/link/Dwarf.zig | 2 +- src/link/Elf.zig | 6 +- src/link/Elf/ZigObject.zig | 16 +- src/link/MachO.zig | 6 +- src/link/MachO/ZigObject.zig | 17 +- src/link/Plan9.zig | 14 +- src/link/Wasm.zig | 11 +- src/link/Wasm/ZigObject.zig | 52 ++-- src/mutable_value.zig | 2 +- 22 files changed, 1025 insertions(+), 963 deletions(-) (limited to 'src/Compilation.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index d3ff338080..1f4c425bc5 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -29,8 +29,6 @@ const wasi_libc = @import("wasi_libc.zig"); const fatal = @import("main.zig").fatal; const clangMain = @import("main.zig").clangMain; const Zcu = @import("Zcu.zig"); -/// Deprecated; use `Zcu`. -const Module = Zcu; const Sema = @import("Sema.zig"); const InternPool = @import("InternPool.zig"); const Cache = std.Build.Cache; @@ -50,7 +48,7 @@ gpa: Allocator, arena: Allocator, /// Not every Compilation compiles .zig code! For example you could do `zig build-exe foo.o`. /// TODO: rename to zcu: ?*Zcu -module: ?*Module, +module: ?*Zcu, /// Contains different state depending on whether the Compilation uses /// incremental or whole cache mode. cache_use: CacheUse, @@ -120,7 +118,7 @@ astgen_work_queue: std.fifo.LinearFifo(Zcu.File.Index, .Dynamic), /// These jobs are to inspect the file system stat() and if the embedded file has changed /// on disk, mark the corresponding Decl outdated and queue up an `analyze_decl` /// task for it. -embed_file_work_queue: std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic), +embed_file_work_queue: std.fifo.LinearFifo(*Zcu.EmbedFile, .Dynamic), /// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator. /// This data is accessed by multiple threads and is protected by `mutex`. @@ -252,7 +250,7 @@ pub const Emit = struct { }; pub const default_stack_protector_buffer_size = target_util.default_stack_protector_buffer_size; -pub const SemaError = Module.SemaError; +pub const SemaError = Zcu.SemaError; pub const CRTFile = struct { lock: Cache.Lock, @@ -1138,7 +1136,7 @@ pub const CreateOptions = struct { pdb_source_path: ?[]const u8 = null, /// (Windows) PDB output path pdb_out_path: ?[]const u8 = null, - error_limit: ?Compilation.Module.ErrorInt = null, + error_limit: ?Zcu.ErrorInt = null, global_cc_argv: []const []const u8 = &.{}, pub const Entry = link.File.OpenOptions.Entry; @@ -1344,7 +1342,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil const main_mod = options.main_mod orelse options.root_mod; const comp = try arena.create(Compilation); - const opt_zcu: ?*Module = if (have_zcu) blk: { + const opt_zcu: ?*Zcu = if (have_zcu) blk: { // Pre-open the directory handles for cached ZIR code so that it does not need // to redundantly happen for each AstGen operation. const zir_sub_dir = "z"; @@ -1362,8 +1360,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .path = try options.global_cache_directory.join(arena, &[_][]const u8{zir_sub_dir}), }; - const emit_h: ?*Module.GlobalEmitH = if (options.emit_h) |loc| eh: { - const eh = try arena.create(Module.GlobalEmitH); + const emit_h: ?*Zcu.GlobalEmitH = if (options.emit_h) |loc| eh: { + const eh = try arena.create(Zcu.GlobalEmitH); eh.* = .{ .loc = loc }; break :eh eh; } else null; @@ -1386,7 +1384,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .builtin_modules = null, // `builtin_mod` is set }); - const zcu = try arena.create(Module); + const zcu = try arena.create(Zcu); zcu.* = .{ .gpa = gpa, .comp = comp, @@ -1434,7 +1432,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa), .win32_resource_work_queue = if (build_options.only_core_functionality) {} else std.fifo.LinearFifo(*Win32Resource, .Dynamic).init(gpa), .astgen_work_queue = std.fifo.LinearFifo(Zcu.File.Index, .Dynamic).init(gpa), - .embed_file_work_queue = std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic).init(gpa), + .embed_file_work_queue = std.fifo.LinearFifo(*Zcu.EmbedFile, .Dynamic).init(gpa), .c_source_files = options.c_source_files, .rc_source_files = options.rc_source_files, .cache_parent = cache, @@ -2626,7 +2624,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { var num_errors: u32 = 0; const max_errors = 5; // Attach the "some omitted" note to the final error message - var last_err: ?*Module.ErrorMsg = null; + var last_err: ?*Zcu.ErrorMsg = null; for (zcu.import_table.values(), 0..) |file, file_index_usize| { if (!file.multi_pkg) continue; @@ -2642,13 +2640,13 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { const omitted = file.references.items.len -| max_notes; const num_notes = file.references.items.len - omitted; - const notes = try gpa.alloc(Module.ErrorMsg, if (omitted > 0) num_notes + 1 else num_notes); + const notes = try gpa.alloc(Zcu.ErrorMsg, if (omitted > 0) num_notes + 1 else num_notes); errdefer gpa.free(notes); for (notes[0..num_notes], file.references.items[0..num_notes], 0..) |*note, ref, i| { errdefer for (notes[0..i]) |*n| n.deinit(gpa); note.* = switch (ref) { - .import => |import| try Module.ErrorMsg.init( + .import => |import| try Zcu.ErrorMsg.init( gpa, .{ .base_node_inst = try ip.trackZir(gpa, import.file, .main_struct_inst), @@ -2657,7 +2655,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { "imported from module {s}", .{zcu.fileByIndex(import.file).mod.fully_qualified_name}, ), - .root => |pkg| try Module.ErrorMsg.init( + .root => |pkg| try Zcu.ErrorMsg.init( gpa, .{ .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), @@ -2671,7 +2669,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { errdefer for (notes[0..num_notes]) |*n| n.deinit(gpa); if (omitted > 0) { - notes[num_notes] = try Module.ErrorMsg.init( + notes[num_notes] = try Zcu.ErrorMsg.init( gpa, .{ .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), @@ -2683,7 +2681,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { } errdefer if (omitted > 0) notes[num_notes].deinit(gpa); - const err = try Module.ErrorMsg.create( + const err = try Zcu.ErrorMsg.create( gpa, .{ .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst), @@ -2706,7 +2704,7 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void { // There isn't really any meaningful place to put this note, so just attach it to the // last failed file - var note = try Module.ErrorMsg.init( + var note = try Zcu.ErrorMsg.init( gpa, err.src_loc, "{} more errors omitted", @@ -3095,10 +3093,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { const values = zcu.compile_log_sources.values(); // First one will be the error; subsequent ones will be notes. const src_loc = values[0].src(); - const err_msg: Module.ErrorMsg = .{ + const err_msg: Zcu.ErrorMsg = .{ .src_loc = src_loc, .msg = "found compile log statement", - .notes = try gpa.alloc(Module.ErrorMsg, zcu.compile_log_sources.count() - 1), + .notes = try gpa.alloc(Zcu.ErrorMsg, zcu.compile_log_sources.count() - 1), }; defer gpa.free(err_msg.notes); @@ -3166,9 +3164,9 @@ pub const ErrorNoteHashContext = struct { }; pub fn addModuleErrorMsg( - mod: *Module, + mod: *Zcu, eb: *ErrorBundle.Wip, - module_err_msg: Module.ErrorMsg, + module_err_msg: Zcu.ErrorMsg, all_references: *const std.AutoHashMapUnmanaged(InternPool.AnalUnit, Zcu.ResolvedReference), ) !void { const gpa = eb.gpa; @@ -3299,7 +3297,7 @@ pub fn addModuleErrorMsg( } } -pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { +pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Zcu.File) !void { assert(file.zir_loaded); assert(file.tree_loaded); assert(file.source_loaded); @@ -3378,7 +3376,7 @@ pub fn performAllTheWork( const path_digest = zcu.filePathDigest(file_index); const root_decl = zcu.fileRootDecl(file_index); const file = zcu.fileByIndex(file_index); - comp.thread_pool.spawnWg(&comp.astgen_wait_group, workerAstGenFile, .{ + comp.thread_pool.spawnWgId(&comp.astgen_wait_group, workerAstGenFile, .{ comp, file, file_index, path_digest, root_decl, zir_prog_node, &comp.astgen_wait_group, .root, }); } @@ -3587,22 +3585,22 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre defer named_frame.end(); const gpa = comp.gpa; - const zcu = comp.module.?; - const decl = zcu.declPtr(decl_index); + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + const decl = pt.zcu.declPtr(decl_index); const lf = comp.bin_file.?; - lf.updateDeclLineNumber(zcu, decl_index) catch |err| { - try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); - zcu.failed_analysis.putAssumeCapacityNoClobber( + lf.updateDeclLineNumber(pt, decl_index) catch |err| { + try pt.zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + pt.zcu.failed_analysis.putAssumeCapacityNoClobber( InternPool.AnalUnit.wrap(.{ .decl = decl_index }), try Zcu.ErrorMsg.create( gpa, - decl.navSrcLoc(zcu), + decl.navSrcLoc(pt.zcu), "unable to update line number: {s}", .{@errorName(err)}, ), ); decl.analysis = .codegen_failure; - try zcu.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + try pt.zcu.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); }; }, .analyze_mod => |mod| { @@ -4049,6 +4047,7 @@ const AstGenSrc = union(enum) { }; fn workerAstGenFile( + tid: usize, comp: *Compilation, file: *Zcu.File, file_index: Zcu.File.Index, @@ -4061,8 +4060,8 @@ fn workerAstGenFile( const child_prog_node = prog_node.start(file.sub_file_path, 0); defer child_prog_node.end(); - const zcu = comp.module.?; - zcu.astGenFile(file, file_index, path_digest, root_decl) catch |err| switch (err) { + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + pt.astGenFile(file, file_index, path_digest, root_decl) catch |err| switch (err) { error.AnalysisFail => return, else => { file.status = .retryable_failure; @@ -4097,15 +4096,15 @@ fn workerAstGenFile( comp.mutex.lock(); defer comp.mutex.unlock(); - const res = zcu.importFile(file, import_path) catch continue; + const res = pt.zcu.importFile(file, import_path) catch continue; if (!res.is_pkg) { - res.file.addReference(zcu.*, .{ .import = .{ + res.file.addReference(pt.zcu.*, .{ .import = .{ .file = file_index, .token = item.data.token, } }) catch continue; } - const imported_path_digest = zcu.filePathDigest(res.file_index); - const imported_root_decl = zcu.fileRootDecl(res.file_index); + const imported_path_digest = pt.zcu.filePathDigest(res.file_index); + const imported_root_decl = pt.zcu.fileRootDecl(res.file_index); break :blk .{ res, imported_path_digest, imported_root_decl }; }; if (import_result.is_new) { @@ -4116,7 +4115,7 @@ fn workerAstGenFile( .importing_file = file_index, .import_tok = item.data.token, } }; - comp.thread_pool.spawnWg(wg, workerAstGenFile, .{ + comp.thread_pool.spawnWgId(wg, workerAstGenFile, .{ comp, import_result.file, import_result.file_index, imported_path_digest, imported_root_decl, prog_node, wg, sub_src, }); } @@ -4127,7 +4126,7 @@ fn workerAstGenFile( fn workerUpdateBuiltinZigFile( comp: *Compilation, mod: *Package.Module, - file: *Module.File, + file: *Zcu.File, ) void { Builtin.populateFile(comp, mod, file) catch |err| { comp.mutex.lock(); @@ -4139,7 +4138,7 @@ fn workerUpdateBuiltinZigFile( }; } -fn workerCheckEmbedFile(comp: *Compilation, embed_file: *Module.EmbedFile) void { +fn workerCheckEmbedFile(comp: *Compilation, embed_file: *Zcu.EmbedFile) void { comp.detectEmbedFileUpdate(embed_file) catch |err| { comp.reportRetryableEmbedFileError(embed_file, err) catch |oom| switch (oom) { // Swallowing this error is OK because it's implied to be OOM when @@ -4150,7 +4149,7 @@ fn workerCheckEmbedFile(comp: *Compilation, embed_file: *Module.EmbedFile) void }; } -fn detectEmbedFileUpdate(comp: *Compilation, embed_file: *Module.EmbedFile) !void { +fn detectEmbedFileUpdate(comp: *Compilation, embed_file: *Zcu.EmbedFile) !void { const mod = comp.module.?; const ip = &mod.intern_pool; var file = try embed_file.owner.root.openFile(embed_file.sub_file_path.toSlice(ip), .{}); @@ -4477,7 +4476,7 @@ fn reportRetryableAstGenError( const file = zcu.fileByIndex(file_index); file.status = .retryable_failure; - const src_loc: Module.LazySrcLoc = switch (src) { + const src_loc: Zcu.LazySrcLoc = switch (src) { .root => .{ .base_node_inst = try zcu.intern_pool.trackZir(gpa, file_index, .main_struct_inst), .offset = .entire_file, @@ -4488,7 +4487,7 @@ fn reportRetryableAstGenError( }, }; - const err_msg = try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ + const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ file.mod.root, file.sub_file_path, @errorName(err), }); errdefer err_msg.destroy(gpa); @@ -4502,14 +4501,14 @@ fn reportRetryableAstGenError( fn reportRetryableEmbedFileError( comp: *Compilation, - embed_file: *Module.EmbedFile, + embed_file: *Zcu.EmbedFile, err: anyerror, ) error{OutOfMemory}!void { const mod = comp.module.?; const gpa = mod.gpa; const src_loc = embed_file.src_loc; const ip = &mod.intern_pool; - const err_msg = try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ + const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{ embed_file.owner.root, embed_file.sub_file_path.toSlice(ip), @errorName(err), diff --git a/src/InternPool.zig b/src/InternPool.zig index 1338743182..97fd35bf20 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -4539,7 +4539,7 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void { assert(ip.items.len == 0); // Reserve string index 0 for an empty string. - assert((try ip.getOrPutString(gpa, "", .no_embedded_nulls)) == .empty); + assert((try ip.getOrPutString(gpa, .main, "", .no_embedded_nulls)) == .empty); // So that we can use `catch unreachable` below. try ip.items.ensureUnusedCapacity(gpa, static_keys.len); @@ -5986,6 +5986,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All ); const string = try ip.getOrPutTrailingString( gpa, + tid, @intCast(len_including_sentinel), .maybe_embedded_nulls, ); @@ -6865,6 +6866,7 @@ pub fn getFuncInstance( return finishFuncInstance( ip, gpa, + tid, generic_owner, func_index, func_extra_index, @@ -6879,7 +6881,7 @@ pub fn getFuncInstance( pub fn getFuncInstanceIes( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, arg: GetFuncInstanceKey, ) Allocator.Error!Index { // Validate input parameters. @@ -6994,6 +6996,7 @@ pub fn getFuncInstanceIes( return finishFuncInstance( ip, gpa, + tid, generic_owner, func_index, func_extra_index, @@ -7005,6 +7008,7 @@ pub fn getFuncInstanceIes( fn finishFuncInstance( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, generic_owner: Index, func_index: Index, func_extra_index: u32, @@ -7036,7 +7040,7 @@ fn finishFuncInstance( // TODO: improve this name const decl = ip.declPtr(decl_index); - decl.name = try ip.getOrPutStringFmt(gpa, "{}__anon_{d}", .{ + decl.name = try ip.getOrPutStringFmt(gpa, tid, "{}__anon_{d}", .{ fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index), }, .no_embedded_nulls); @@ -8782,18 +8786,20 @@ const EmbeddedNulls = enum { pub fn getOrPutString( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, slice: []const u8, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { try ip.string_bytes.ensureUnusedCapacity(gpa, slice.len + 1); ip.string_bytes.appendSliceAssumeCapacity(slice); ip.string_bytes.appendAssumeCapacity(0); - return ip.getOrPutTrailingString(gpa, slice.len + 1, embedded_nulls); + return ip.getOrPutTrailingString(gpa, tid, slice.len + 1, embedded_nulls); } pub fn getOrPutStringFmt( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, comptime format: []const u8, args: anytype, comptime embedded_nulls: EmbeddedNulls, @@ -8803,16 +8809,17 @@ pub fn getOrPutStringFmt( try ip.string_bytes.ensureUnusedCapacity(gpa, len); ip.string_bytes.writer(undefined).print(format, args) catch unreachable; ip.string_bytes.appendAssumeCapacity(0); - return ip.getOrPutTrailingString(gpa, len, embedded_nulls); + return ip.getOrPutTrailingString(gpa, tid, len, embedded_nulls); } pub fn getOrPutStringOpt( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, slice: ?[]const u8, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.OptionalStringType() { - const string = try getOrPutString(ip, gpa, slice orelse return .none, embedded_nulls); + const string = try getOrPutString(ip, gpa, tid, slice orelse return .none, embedded_nulls); return string.toOptional(); } @@ -8820,9 +8827,11 @@ pub fn getOrPutStringOpt( pub fn getOrPutTrailingString( ip: *InternPool, gpa: Allocator, + tid: Zcu.PerThread.Id, len: usize, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { + _ = tid; const string_bytes = &ip.string_bytes; const str_index: u32 = @intCast(string_bytes.items.len - len); if (len > 0 and string_bytes.getLast() == 0) { diff --git a/src/Sema.zig b/src/Sema.zig index dd8d2712ed..ee4ac3b703 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2093,12 +2093,12 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) const st_ptr = try err_trace_block.addTy(.alloc, try pt.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; - const instruction_addresses_field_name = try ip.getOrPutString(gpa, "instruction_addresses", .no_embedded_nulls); + const instruction_addresses_field_name = try ip.getOrPutString(gpa, pt.tid, "instruction_addresses", .no_embedded_nulls); const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, instruction_addresses_field_name, src, true); try sema.storePtr2(&err_trace_block, src, addr_field_ptr, src, addrs_ptr, src, .store); // st.index = 0; - const index_field_name = try ip.getOrPutString(gpa, "index", .no_embedded_nulls); + const index_field_name = try ip.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, index_field_name, src, true); try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store); @@ -2691,6 +2691,7 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us .decl_val => |str| capture: { const decl_name = try ip.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(str), .no_embedded_nulls, ); @@ -2700,6 +2701,7 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us .decl_ref => |str| capture: { const decl_name = try ip.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(str), .no_embedded_nulls, ); @@ -2847,7 +2849,7 @@ fn zirStructDecl( if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); - try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); + try pt.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); } try pt.finalizeAnonDecl(new_decl_index); @@ -2919,7 +2921,7 @@ fn createAnonymousDeclTypeNamed( }; try writer.writeByte(')'); - const name = try ip.getOrPutString(gpa, buf.items, .no_embedded_nulls); + const name = try ip.getOrPutString(gpa, pt.tid, buf.items, .no_embedded_nulls); try zcu.initNewAnonDecl(new_decl_index, val, name); return new_decl_index; }, @@ -2931,7 +2933,7 @@ fn createAnonymousDeclTypeNamed( .dbg_var_ptr, .dbg_var_val => { if (zir_data[i].str_op.operand != ref) continue; - const name = try ip.getOrPutStringFmt(gpa, "{}.{s}", .{ + const name = try ip.getOrPutStringFmt(gpa, pt.tid, "{}.{s}", .{ block.type_name_ctx.fmt(ip), zir_data[i].str_op.getStr(sema.code), }, .no_embedded_nulls); try zcu.initNewAnonDecl(new_decl_index, val, name); @@ -2952,7 +2954,7 @@ fn createAnonymousDeclTypeNamed( // This name is also used as the key in the parent namespace so it cannot be // renamed. - const name = ip.getOrPutStringFmt(gpa, "{}__{s}_{d}", .{ + const name = ip.getOrPutStringFmt(gpa, pt.tid, "{}__{s}_{d}", .{ block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(new_decl_index), }, .no_embedded_nulls) catch unreachable; try zcu.initNewAnonDecl(new_decl_index, val, name); @@ -3084,7 +3086,7 @@ fn zirEnumDecl( errdefer if (!done) if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns); if (new_namespace_index.unwrap()) |ns| { - try mod.scanNamespace(ns, decls, new_decl); + try pt.scanNamespace(ns, decls, new_decl); } // We've finished the initial construction of this type, and are about to perform analysis. @@ -3169,7 +3171,7 @@ fn zirEnumDecl( const field_name_zir = sema.code.nullTerminatedString(field_name_index); extra_index += 2; // field name, doc comment - const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir, .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); const value_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, @@ -3352,7 +3354,7 @@ fn zirUnionDecl( if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); - try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); + try pt.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); } try pt.finalizeAnonDecl(new_decl_index); @@ -3441,7 +3443,7 @@ fn zirOpaqueDecl( if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); - try mod.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); + try pt.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); } try pt.finalizeAnonDecl(new_decl_index); @@ -3470,7 +3472,7 @@ fn zirErrorSetDecl( while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]); const name = sema.code.nullTerminatedString(name_index); - const name_ip = try mod.intern_pool.getOrPutString(gpa, name, .no_embedded_nulls); + const name_ip = try mod.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls); _ = try mod.getErrorValue(name_ip); const result = names.getOrPutAssumeCapacity(name_ip); assert(!result.found_existing); // verified in AstGen @@ -3634,7 +3636,7 @@ fn indexablePtrLen( const is_pointer_to = object_ty.isSinglePointer(mod); const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; try checkIndexable(sema, block, src, indexable_ty); - const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls); return sema.fieldVal(block, src, object, field_name, src); } @@ -3649,7 +3651,7 @@ fn indexablePtrLenOrNone( const operand_ty = sema.typeOf(operand); try checkMemOperand(sema, block, src, operand_ty); if (operand_ty.ptrSize(mod) == .Many) return .none; - const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls); return sema.fieldVal(block, src, operand, field_name, src); } @@ -4405,7 +4407,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (!object_ty.indexableHasLen(mod)) continue; - break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), arg_src); + break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), arg_src); }; const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src); if (len == .none) { @@ -4797,6 +4799,7 @@ fn validateUnionInit( const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( gpa, + pt.tid, sema.code.nullTerminatedString(field_ptr_extra.field_name_start), .no_embedded_nulls, ); @@ -4942,6 +4945,7 @@ fn validateStructInit( struct_ptr_zir_ref = field_ptr_extra.lhs; const field_name = try ip.getOrPutString( gpa, + pt.tid, sema.code.nullTerminatedString(field_ptr_extra.field_name_start), .no_embedded_nulls, ); @@ -5518,10 +5522,11 @@ fn failWithBadStructFieldAccess( field_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, ) CompileError { - const zcu = sema.pt.zcu; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const decl = zcu.declPtr(struct_type.decl.unwrap().?); - const fqn = try decl.fullyQualifiedName(zcu); + const fqn = try decl.fullyQualifiedName(pt); const msg = msg: { const msg = try sema.errMsg( @@ -5544,12 +5549,13 @@ fn failWithBadUnionFieldAccess( field_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, ) CompileError { - const zcu = sema.pt.zcu; + const pt = sema.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = sema.gpa; const decl = zcu.declPtr(union_obj.decl); - const fqn = try decl.fullyQualifiedName(zcu); + const fqn = try decl.fullyQualifiedName(pt); const msg = msg: { const msg = try sema.errMsg( @@ -5715,7 +5721,7 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v fn zirStr(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const bytes = sema.code.instructions.items(.data)[@intFromEnum(inst)].str.get(sema.code); return sema.addStrLit( - try sema.pt.zcu.intern_pool.getOrPutString(sema.gpa, bytes, .maybe_embedded_nulls), + try sema.pt.zcu.intern_pool.getOrPutString(sema.gpa, sema.pt.tid, bytes, .maybe_embedded_nulls), bytes.len, ); } @@ -6057,7 +6063,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr const path_digest = zcu.filePathDigest(result.file_index); const root_decl = zcu.fileRootDecl(result.file_index); - zcu.astGenFile(result.file, result.file_index, path_digest, root_decl) catch |err| + pt.astGenFile(result.file, result.file_index, path_digest, root_decl) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); try pt.ensureFileAnalyzed(result.file_index); @@ -6418,6 +6424,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const options_src = block.builtinCallArgSrc(inst_data.src_node, 1); const decl_name = try mod.intern_pool.getOrPutString( mod.gpa, + pt.tid, sema.code.nullTerminatedString(extra.decl_name), .no_embedded_nulls, ); @@ -6737,6 +6744,7 @@ fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const src = block.tokenOffset(inst_data.src_tok); const decl_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -6751,6 +6759,7 @@ fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const src = block.tokenOffset(inst_data.src_tok); const decl_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -6907,7 +6916,7 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); - const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, LazySrcLoc.unneeded) catch |err| switch (err) { error.AnalysisFail => @panic("std.builtin.StackTrace is corrupt"), error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, @@ -6951,7 +6960,7 @@ fn popErrorReturnTrace( try stack_trace_ty.resolveFields(pt); const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store); } else if (is_non_error == null) { @@ -6977,7 +6986,7 @@ fn popErrorReturnTrace( try stack_trace_ty.resolveFields(pt); const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store); _ = try then_block.addBr(cond_block_inst, .void_value); @@ -7038,6 +7047,7 @@ fn zirCall( const object_ptr = try sema.resolveInst(extra.data.obj_ptr); const field_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(extra.data.field_name_start), .no_embedded_nulls, ); @@ -7103,7 +7113,7 @@ fn zirCall( if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) { const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); - const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index", .no_embedded_nulls); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "index", .no_embedded_nulls); const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src); // Insert a save instruction before the arg resolution + call instructions we just generated @@ -8687,6 +8697,7 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = try pt.zcu.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -8849,7 +8860,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = inst_data.get(sema.code); return Air.internedToRef((try pt.intern(.{ - .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, name, .no_embedded_nulls), + .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, name, .no_embedded_nulls), }))); } @@ -9820,7 +9831,7 @@ fn funcCommon( const func_index = try ip.getExternFunc(gpa, pt.tid, .{ .ty = func_ty, .decl = sema.owner_decl_index, - .lib_name = try mod.intern_pool.getOrPutStringOpt(gpa, opt_lib_name, .no_embedded_nulls), + .lib_name = try mod.intern_pool.getOrPutStringOpt(gpa, pt.tid, opt_lib_name, .no_embedded_nulls), }); return finishFunc( sema, @@ -10281,6 +10292,7 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(extra.field_name_start), .no_embedded_nulls, ); @@ -10300,6 +10312,7 @@ fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(extra.field_name_start), .no_embedded_nulls, ); @@ -10319,6 +10332,7 @@ fn zirStructInitFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, sema.code.nullTerminatedString(extra.field_name_start), .no_embedded_nulls, ); @@ -13983,6 +13997,7 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -17716,7 +17731,7 @@ fn zirBuiltinSrc( .val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, .storage = .{ - .bytes = try ip.getOrPutString(gpa, file_name, .maybe_embedded_nulls), + .bytes = try ip.getOrPutString(gpa, pt.tid, file_name, .maybe_embedded_nulls), }, } }), } }, @@ -17778,7 +17793,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Fn", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Fn", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(fn_info_decl_index); const fn_info_decl = mod.declPtr(fn_info_decl_index); @@ -17788,7 +17803,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, fn_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Param", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Param", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(param_info_decl_index); const param_info_decl = mod.declPtr(param_info_decl_index); @@ -17890,7 +17905,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Int", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Int", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(int_info_decl_index); const int_info_decl = mod.declPtr(int_info_decl_index); @@ -17918,7 +17933,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Float", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Float", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(float_info_decl_index); const float_info_decl = mod.declPtr(float_info_decl_index); @@ -17950,7 +17965,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Pointer", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Pointer", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); const decl = mod.declPtr(decl_index); @@ -17961,7 +17976,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, pointer_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Size", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Size", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); const decl = mod.declPtr(decl_index); @@ -18004,7 +18019,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Array", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Array", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(array_field_ty_decl_index); const array_field_ty_decl = mod.declPtr(array_field_ty_decl_index); @@ -18035,7 +18050,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Vector", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Vector", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(vector_field_ty_decl_index); const vector_field_ty_decl = mod.declPtr(vector_field_ty_decl_index); @@ -18064,7 +18079,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Optional", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Optional", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(optional_field_ty_decl_index); const optional_field_ty_decl = mod.declPtr(optional_field_ty_decl_index); @@ -18091,7 +18106,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Error", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Error", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(set_field_ty_decl_index); const set_field_ty_decl = mod.declPtr(set_field_ty_decl_index); @@ -18197,7 +18212,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "ErrorUnion", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "ErrorUnion", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index); const error_union_field_ty_decl = mod.declPtr(error_union_field_ty_decl_index); @@ -18227,7 +18242,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "EnumField", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "EnumField", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); const enum_field_ty_decl = mod.declPtr(enum_field_ty_decl_index); @@ -18324,7 +18339,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Enum", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Enum", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(type_enum_ty_decl_index); const type_enum_ty_decl = mod.declPtr(type_enum_ty_decl_index); @@ -18356,7 +18371,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Union", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Union", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(type_union_ty_decl_index); const type_union_ty_decl = mod.declPtr(type_union_ty_decl_index); @@ -18368,7 +18383,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "UnionField", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "UnionField", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(union_field_ty_decl_index); const union_field_ty_decl = mod.declPtr(union_field_ty_decl_index); @@ -18473,7 +18488,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); const decl = mod.declPtr(decl_index); @@ -18506,7 +18521,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Struct", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Struct", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(type_struct_ty_decl_index); const type_struct_ty_decl = mod.declPtr(type_struct_ty_decl_index); @@ -18518,7 +18533,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "StructField", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "StructField", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index); @@ -18540,7 +18555,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_name = if (anon_struct_type.names.len != 0) anon_struct_type.names.get(ip)[field_index] else - try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); + try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); const field_name_len = field_name.length(ip); const new_decl_ty = try pt.arrayType(.{ .len = field_name_len, @@ -18600,7 +18615,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_name = if (struct_type.fieldName(ip, field_index).unwrap()) |field_name| field_name else - try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); + try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); const field_name_len = field_name.length(ip); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); const field_init = struct_type.fieldInit(ip, field_index); @@ -18706,7 +18721,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); const decl = mod.declPtr(decl_index); @@ -18742,7 +18757,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, "Opaque", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "Opaque", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index); const type_opaque_ty_decl = mod.declPtr(type_opaque_ty_decl_index); @@ -18786,7 +18801,7 @@ fn typeInfoDecls( block, src, type_info_ty.getNamespaceIndex(mod), - try mod.intern_pool.getOrPutString(gpa, "Declaration", .no_embedded_nulls), + try mod.intern_pool.getOrPutString(gpa, pt.tid, "Declaration", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(declaration_ty_decl_index); const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); @@ -19541,6 +19556,7 @@ fn zirRetErrValue( const src = block.tokenOffset(inst_data.src_tok); const err_name = try mod.intern_pool.getOrPutString( sema.gpa, + pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); @@ -20251,6 +20267,7 @@ fn zirStructInit( const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = try ip.getOrPutString( gpa, + pt.tid, sema.code.nullTerminatedString(field_type_extra.name_start), .no_embedded_nulls, ); @@ -20292,6 +20309,7 @@ fn zirStructInit( const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = try ip.getOrPutString( gpa, + pt.tid, sema.code.nullTerminatedString(field_type_extra.name_start), .no_embedded_nulls, ); @@ -20581,7 +20599,7 @@ fn structInitAnon( }, }; - field_name.* = try mod.intern_pool.getOrPutString(gpa, name, .no_embedded_nulls); + field_name.* = try mod.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls); const init = try sema.resolveInst(item.data.init); field_ty.* = sema.typeOf(init).toIntern(); @@ -20958,7 +20976,7 @@ fn zirStructInitFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp }; const aggregate_ty = wrapped_aggregate_ty.optEuBaseType(mod); const zir_field_name = sema.code.nullTerminatedString(extra.name_start); - const field_name = try ip.getOrPutString(sema.gpa, zir_field_name, .no_embedded_nulls); + const field_name = try ip.getOrPutString(sema.gpa, pt.tid, zir_field_name, .no_embedded_nulls); return sema.fieldType(block, aggregate_ty, field_name, field_name_src, ty_src); } @@ -21344,11 +21362,11 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const signedness_val = try Value.fromInterned(union_val.val).fieldValue( pt, - struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "signedness", .no_embedded_nulls)).?, + struct_type.nameIndex(ip, try ip.getOrPutString(gpa, pt.tid, "signedness", .no_embedded_nulls)).?, ); const bits_val = try Value.fromInterned(union_val.val).fieldValue( pt, - struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "bits", .no_embedded_nulls)).?, + struct_type.nameIndex(ip, try ip.getOrPutString(gpa, pt.tid, "bits", .no_embedded_nulls)).?, ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); @@ -21360,11 +21378,11 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "len", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), ).?); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "child", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const len: u32 = @intCast(try len_val.toUnsignedIntSema(pt)); @@ -21382,7 +21400,7 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const bits_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "bits", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "bits", .no_embedded_nulls), ).?); const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(pt)); @@ -21400,35 +21418,35 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const size_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "size", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "size", .no_embedded_nulls), ).?); const is_const_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_const", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_const", .no_embedded_nulls), ).?); const is_volatile_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_volatile", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_volatile", .no_embedded_nulls), ).?); const alignment_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "alignment", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "alignment", .no_embedded_nulls), ).?); const address_space_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "address_space", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "address_space", .no_embedded_nulls), ).?); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "child", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const is_allowzero_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_allowzero", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_allowzero", .no_embedded_nulls), ).?); const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "sentinel", .no_embedded_nulls), ).?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { @@ -21505,15 +21523,15 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "len", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), ).?); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "child", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "sentinel", .no_embedded_nulls), ).?); const len = try len_val.toUnsignedIntSema(pt); @@ -21534,7 +21552,7 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "child", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const child_ty = child_val.toType(); @@ -21546,11 +21564,11 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const error_set_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "error_set", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "error_set", .no_embedded_nulls), ).?); const payload_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "payload", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "payload", .no_embedded_nulls), ).?); const error_set_ty = error_set_val.toType(); @@ -21579,7 +21597,7 @@ fn zirReify( const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern())); const name_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "name", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), ).?); const name = try sema.sliceToIpString(block, src, name_val, .{ @@ -21601,23 +21619,23 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "layout", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "layout", .no_embedded_nulls), ).?); const backing_integer_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "backing_integer", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "backing_integer", .no_embedded_nulls), ).?); const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "fields", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "fields", .no_embedded_nulls), ).?); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); const is_tuple_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_tuple", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_tuple", .no_embedded_nulls), ).?); const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -21641,19 +21659,19 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "tag_type", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "tag_type", .no_embedded_nulls), ).?); const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "fields", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "fields", .no_embedded_nulls), ).?); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); const is_exhaustive_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_exhaustive", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_exhaustive", .no_embedded_nulls), ).?); if (try decls_val.sliceLen(pt) > 0) { @@ -21670,7 +21688,7 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); // Decls @@ -21707,19 +21725,19 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "layout", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "layout", .no_embedded_nulls), ).?); const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "tag_type", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "tag_type", .no_embedded_nulls), ).?); const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "fields", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "fields", .no_embedded_nulls), ).?); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); if (try decls_val.sliceLen(pt) > 0) { @@ -21737,23 +21755,23 @@ fn zirReify( const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const calling_convention_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "calling_convention", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "calling_convention", .no_embedded_nulls), ).?); const is_generic_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_generic", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_generic", .no_embedded_nulls), ).?); const is_var_args_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_var_args", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_var_args", .no_embedded_nulls), ).?); const return_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "return_type", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "return_type", .no_embedded_nulls), ).?); const params_slice_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "params", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "params", .no_embedded_nulls), ).?); const is_generic = is_generic_val.toBool(); @@ -21783,15 +21801,15 @@ fn zirReify( const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern())); const param_is_generic_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_generic", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_generic", .no_embedded_nulls), ).?); const param_is_noalias_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "is_noalias", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "is_noalias", .no_embedded_nulls), ).?); const opt_param_type_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, - try ip.getOrPutString(gpa, "type", .no_embedded_nulls), + try ip.getOrPutString(gpa, pt.tid, "type", .no_embedded_nulls), ).?); if (param_is_generic_val.toBool()) { @@ -22535,7 +22553,7 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const ty = try sema.resolveType(block, ty_src, inst_data.operand); - const type_name = try ip.getOrPutStringFmt(sema.gpa, "{}", .{ty.fmt(pt)}, .no_embedded_nulls); + const type_name = try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{}", .{ty.fmt(pt)}, .no_embedded_nulls); return sema.addNullTerminatedStrLit(type_name); } @@ -24143,18 +24161,18 @@ fn resolveExportOptions( const section_src = block.src(.{ .init_field_section = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const visibility_src = block.src(.{ .init_field_visibility = src.offset.node_offset_builtin_call_arg.builtin_call_node }); - const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name", .no_embedded_nulls), name_src); + const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), name_src); const name = try sema.toConstString(block, name_src, name_operand, .{ .needed_comptime_reason = "name of exported value must be comptime-known", }); - const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage", .no_embedded_nulls), linkage_src); + const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "linkage", .no_embedded_nulls), linkage_src); const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_operand, .{ .needed_comptime_reason = "linkage of exported value must be comptime-known", }); const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "section", .no_embedded_nulls), section_src); + const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "section", .no_embedded_nulls), section_src); const section_opt_val = try sema.resolveConstDefinedValue(block, section_src, section_operand, .{ .needed_comptime_reason = "linksection of exported value must be comptime-known", }); @@ -24165,7 +24183,7 @@ fn resolveExportOptions( else null; - const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "visibility", .no_embedded_nulls), visibility_src); + const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "visibility", .no_embedded_nulls), visibility_src); const visibility_val = try sema.resolveConstDefinedValue(block, visibility_src, visibility_operand, .{ .needed_comptime_reason = "visibility of exported value must be comptime-known", }); @@ -24182,9 +24200,9 @@ fn resolveExportOptions( } return .{ - .name = try ip.getOrPutString(gpa, name, .no_embedded_nulls), + .name = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls), .linkage = linkage, - .section = try ip.getOrPutStringOpt(gpa, section, .no_embedded_nulls), + .section = try ip.getOrPutStringOpt(gpa, pt.tid, section, .no_embedded_nulls), .visibility = visibility, }; } @@ -25821,7 +25839,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const runtime_src = rs: { const ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src; - const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), dest_src); + const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; const len_u64 = (try len_val.getUnsignedIntAdvanced(pt, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); @@ -25952,7 +25970,7 @@ fn zirVarExtended( .ty = var_ty.toIntern(), .init = init_val, .decl = sema.owner_decl_index, - .lib_name = try mod.intern_pool.getOrPutStringOpt(sema.gpa, lib_name, .no_embedded_nulls), + .lib_name = try mod.intern_pool.getOrPutStringOpt(sema.gpa, pt.tid, lib_name, .no_embedded_nulls), .is_extern = small.is_extern, .is_const = small.is_const, .is_threadlocal = small.is_threadlocal, @@ -26323,17 +26341,17 @@ fn resolvePrefetchOptions( const locality_src = block.src(.{ .init_field_locality = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const cache_src = block.src(.{ .init_field_cache = src.offset.node_offset_builtin_call_arg.builtin_call_node }); - const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "rw", .no_embedded_nulls), rw_src); + const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "rw", .no_embedded_nulls), rw_src); const rw_val = try sema.resolveConstDefinedValue(block, rw_src, rw, .{ .needed_comptime_reason = "prefetch read/write must be comptime-known", }); - const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "locality", .no_embedded_nulls), locality_src); + const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "locality", .no_embedded_nulls), locality_src); const locality_val = try sema.resolveConstDefinedValue(block, locality_src, locality, .{ .needed_comptime_reason = "prefetch locality must be comptime-known", }); - const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "cache", .no_embedded_nulls), cache_src); + const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "cache", .no_embedded_nulls), cache_src); const cache_val = try sema.resolveConstDefinedValue(block, cache_src, cache, .{ .needed_comptime_reason = "prefetch cache must be comptime-known", }); @@ -26397,23 +26415,23 @@ fn resolveExternOptions( const linkage_src = block.src(.{ .init_field_linkage = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const thread_local_src = block.src(.{ .init_field_thread_local = src.offset.node_offset_builtin_call_arg.builtin_call_node }); - const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name", .no_embedded_nulls), name_src); + const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), name_src); const name = try sema.toConstString(block, name_src, name_ref, .{ .needed_comptime_reason = "name of the extern symbol must be comptime-known", }); - const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "library_name", .no_embedded_nulls), library_src); + const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "library_name", .no_embedded_nulls), library_src); const library_name_val = try sema.resolveConstDefinedValue(block, library_src, library_name_inst, .{ .needed_comptime_reason = "library in which extern symbol is must be comptime-known", }); - const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage", .no_embedded_nulls), linkage_src); + const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "linkage", .no_embedded_nulls), linkage_src); const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_ref, .{ .needed_comptime_reason = "linkage of the extern symbol must be comptime-known", }); const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "is_thread_local", .no_embedded_nulls), thread_local_src); + const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "is_thread_local", .no_embedded_nulls), thread_local_src); const is_thread_local_val = try sema.resolveConstDefinedValue(block, thread_local_src, is_thread_local, .{ .needed_comptime_reason = "threadlocality of the extern symbol must be comptime-known", }); @@ -26438,8 +26456,8 @@ fn resolveExternOptions( } return .{ - .name = try ip.getOrPutString(gpa, name, .no_embedded_nulls), - .library_name = try ip.getOrPutStringOpt(gpa, library_name, .no_embedded_nulls), + .name = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls), + .library_name = try ip.getOrPutStringOpt(gpa, pt.tid, library_name, .no_embedded_nulls), .linkage = linkage, .is_thread_local = is_thread_local_val.toBool(), }; @@ -27052,7 +27070,7 @@ fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternP block, LazySrcLoc.unneeded, panic_messages_ty.getNamespaceIndex(mod), - try mod.intern_pool.getOrPutString(gpa, @tagName(panic_id), .no_embedded_nulls), + try mod.intern_pool.getOrPutString(gpa, pt.tid, @tagName(panic_id), .no_embedded_nulls), ) catch |err| switch (err) { error.AnalysisFail => @panic("std.builtin.panic_messages is corrupt"), error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, @@ -31745,7 +31763,7 @@ fn coerceTupleToStruct( .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) anon_struct_type.names.get(ip)[tuple_field_index] else - try ip.getOrPutStringFmt(sema.gpa, "{d}", .{tuple_field_index}, .no_embedded_nulls), + try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{tuple_field_index}, .no_embedded_nulls), .struct_type => ip.loadStructType(inst_ty.toIntern()).field_names.get(ip)[tuple_field_index], else => unreachable, }; @@ -31858,13 +31876,13 @@ fn coerceTupleToTuple( .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) anon_struct_type.names.get(ip)[field_i] else - try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}, .no_embedded_nulls), + try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_i}, .no_embedded_nulls), .struct_type => s: { const struct_type = ip.loadStructType(inst_ty.toIntern()); if (struct_type.field_names.len > 0) { break :s struct_type.field_names.get(ip)[field_i]; } else { - break :s try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}, .no_embedded_nulls); + break :s try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_i}, .no_embedded_nulls); } }, else => unreachable, @@ -34849,7 +34867,7 @@ fn resolvePeerTypesInner( const result_buf = try sema.arena.create(PeerResolveResult); result_buf.* = result; const field_name = if (is_tuple) - try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_index}, .no_embedded_nulls) + try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls) else field_names[field_index]; @@ -36066,7 +36084,7 @@ fn semaStructFields( // This string needs to outlive the ZIR code. if (opt_field_name_zir) |field_name_zir| { - const field_name = try ip.getOrPutString(gpa, field_name_zir, .no_embedded_nulls); + const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); assert(struct_type.addFieldName(ip, field_name) == null); } @@ -36567,7 +36585,7 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L } // This string needs to outlive the ZIR code. - const field_name = try ip.getOrPutString(gpa, field_name_zir, .no_embedded_nulls); + const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); if (enum_field_names.len != 0) { enum_field_names[field_i] = field_name; } @@ -36716,9 +36734,10 @@ fn generateUnionTagTypeNumbered( const new_decl_index = try mod.allocateNewDecl(block.namespace); errdefer mod.destroyDecl(new_decl_index); - const fqn = try union_owner_decl.fullyQualifiedName(mod); + const fqn = try union_owner_decl.fullyQualifiedName(pt); const name = try ip.getOrPutStringFmt( gpa, + pt.tid, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)}, .no_embedded_nulls, @@ -36764,11 +36783,12 @@ fn generateUnionTagTypeSimple( const gpa = sema.gpa; const new_decl_index = new_decl_index: { - const fqn = try union_owner_decl.fullyQualifiedName(mod); + const fqn = try union_owner_decl.fullyQualifiedName(pt); const new_decl_index = try mod.allocateNewDecl(block.namespace); errdefer mod.destroyDecl(new_decl_index); const name = try ip.getOrPutStringFmt( gpa, + pt.tid, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(ip)}, .no_embedded_nulls, diff --git a/src/Value.zig b/src/Value.zig index 21bb207b59..e47598fe0a 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -67,7 +67,7 @@ pub fn toIpString(val: Value, ty: Type, pt: Zcu.PerThread) !InternPool.NullTermi const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(pt)); const len: usize = @intCast(ty.arrayLen(mod)); try ip.string_bytes.appendNTimes(mod.gpa, byte, len); - return ip.getOrPutTrailingString(mod.gpa, len, .no_embedded_nulls); + return ip.getOrPutTrailingString(mod.gpa, pt.tid, len, .no_embedded_nulls); }, } } @@ -118,7 +118,7 @@ fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.Null const byte: u8 = @intCast(elem_val.toUnsignedInt(pt)); ip.string_bytes.appendAssumeCapacity(byte); } - return ip.getOrPutTrailingString(gpa, len, .no_embedded_nulls); + return ip.getOrPutTrailingString(gpa, pt.tid, len, .no_embedded_nulls); } pub fn fromInterned(i: InternPool.Index) Value { diff --git a/src/Zcu.zig b/src/Zcu.zig index bfc70815df..c4ebc6a36b 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -420,11 +420,11 @@ pub const Decl = struct { return zcu.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(zcu, decl.name, writer); } - pub fn fullyQualifiedName(decl: Decl, zcu: *Zcu) !InternPool.NullTerminatedString { + pub fn fullyQualifiedName(decl: Decl, pt: Zcu.PerThread) !InternPool.NullTerminatedString { return if (decl.name_fully_qualified) decl.name else - zcu.namespacePtr(decl.src_namespace).fullyQualifiedName(zcu, decl.name); + pt.zcu.namespacePtr(decl.src_namespace).fullyQualifiedName(pt, decl.name); } pub fn typeOf(decl: Decl, zcu: *const Zcu) Type { @@ -688,9 +688,10 @@ pub const Namespace = struct { pub fn fullyQualifiedName( ns: Namespace, - zcu: *Zcu, + pt: Zcu.PerThread, name: InternPool.NullTerminatedString, ) !InternPool.NullTerminatedString { + const zcu = pt.zcu; const ip = &zcu.intern_pool; const count = count: { var count: usize = name.length(ip) + 1; @@ -723,7 +724,7 @@ pub const Namespace = struct { }; } - return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start, .no_embedded_nulls); + return ip.getOrPutTrailingString(gpa, pt.tid, ip.string_bytes.items.len - start, .no_embedded_nulls); } pub fn getType(ns: Namespace, zcu: *Zcu) Type { @@ -875,11 +876,12 @@ pub const File = struct { }; } - pub fn fullyQualifiedName(file: File, mod: *Module) !InternPool.NullTerminatedString { - const ip = &mod.intern_pool; + pub fn fullyQualifiedName(file: File, pt: Zcu.PerThread) !InternPool.NullTerminatedString { + const gpa = pt.zcu.gpa; + const ip = &pt.zcu.intern_pool; const start = ip.string_bytes.items.len; - try file.renderFullyQualifiedName(ip.string_bytes.writer(mod.gpa)); - return ip.getOrPutTrailingString(mod.gpa, ip.string_bytes.items.len - start, .no_embedded_nulls); + try file.renderFullyQualifiedName(ip.string_bytes.writer(gpa)); + return ip.getOrPutTrailingString(gpa, pt.tid, ip.string_bytes.items.len - start, .no_embedded_nulls); } pub fn fullPath(file: File, ally: Allocator) ![]u8 { @@ -2569,8 +2571,8 @@ pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { } // TODO https://github.com/ziglang/zig/issues/8643 -const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8; -const HackDataLayout = extern struct { +pub const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8; +pub const HackDataLayout = extern struct { data: [8]u8 align(@alignOf(Zir.Inst.Data)), safety_tag: u8, }; @@ -2580,291 +2582,11 @@ comptime { } } -pub fn astGenFile( - zcu: *Zcu, - file: *File, - /// This parameter is provided separately from `file` because it is not - /// safe to access `import_table` without a lock, and this index is needed - /// in the call to `updateZirRefs`. - file_index: File.Index, - path_digest: Cache.BinDigest, - opt_root_decl: Zcu.Decl.OptionalIndex, -) !void { - assert(!file.mod.isBuiltin()); - - const tracy = trace(@src()); - defer tracy.end(); - - const comp = zcu.comp; - const gpa = zcu.gpa; - - // In any case we need to examine the stat of the file to determine the course of action. - var source_file = try file.mod.root.openFile(file.sub_file_path, .{}); - defer source_file.close(); - - const stat = try source_file.stat(); - - const want_local_cache = file.mod == zcu.main_mod; - const hex_digest = Cache.binToHex(path_digest); - const cache_directory = if (want_local_cache) zcu.local_zir_cache else zcu.global_zir_cache; - const zir_dir = cache_directory.handle; - - // Determine whether we need to reload the file from disk and redo parsing and AstGen. - var lock: std.fs.File.Lock = switch (file.status) { - .never_loaded, .retryable_failure => lock: { - // First, load the cached ZIR code, if any. - log.debug("AstGen checking cache: {s} (local={}, digest={s})", .{ - file.sub_file_path, want_local_cache, &hex_digest, - }); - - break :lock .shared; - }, - .parse_failure, .astgen_failure, .success_zir => lock: { - const unchanged_metadata = - stat.size == file.stat.size and - stat.mtime == file.stat.mtime and - stat.inode == file.stat.inode; - - if (unchanged_metadata) { - log.debug("unmodified metadata of file: {s}", .{file.sub_file_path}); - return; - } - - log.debug("metadata changed: {s}", .{file.sub_file_path}); - - break :lock .exclusive; - }, - }; - - // We ask for a lock in order to coordinate with other zig processes. - // If another process is already working on this file, we will get the cached - // version. Likewise if we're working on AstGen and another process asks for - // the cached file, they'll get it. - const cache_file = while (true) { - break zir_dir.createFile(&hex_digest, .{ - .read = true, - .truncate = false, - .lock = lock, - }) catch |err| switch (err) { - error.NotDir => unreachable, // no dir components - error.InvalidUtf8 => unreachable, // it's a hex encoded name - error.InvalidWtf8 => unreachable, // it's a hex encoded name - error.BadPathName => unreachable, // it's a hex encoded name - error.NameTooLong => unreachable, // it's a fixed size name - error.PipeBusy => unreachable, // it's not a pipe - error.WouldBlock => unreachable, // not asking for non-blocking I/O - // There are no dir components, so you would think that this was - // unreachable, however we have observed on macOS two processes racing - // to do openat() with O_CREAT manifest in ENOENT. - error.FileNotFound => continue, - - else => |e| return e, // Retryable errors are handled at callsite. - }; - }; - defer cache_file.close(); - - while (true) { - update: { - // First we read the header to determine the lengths of arrays. - const header = cache_file.reader().readStruct(Zir.Header) catch |err| switch (err) { - // This can happen if Zig bails out of this function between creating - // the cached file and writing it. - error.EndOfStream => break :update, - else => |e| return e, - }; - const unchanged_metadata = - stat.size == header.stat_size and - stat.mtime == header.stat_mtime and - stat.inode == header.stat_inode; - - if (!unchanged_metadata) { - log.debug("AstGen cache stale: {s}", .{file.sub_file_path}); - break :update; - } - log.debug("AstGen cache hit: {s} instructions_len={d}", .{ - file.sub_file_path, header.instructions_len, - }); - - file.zir = loadZirCacheBody(gpa, header, cache_file) catch |err| switch (err) { - error.UnexpectedFileSize => { - log.warn("unexpected EOF reading cached ZIR for {s}", .{file.sub_file_path}); - break :update; - }, - else => |e| return e, - }; - file.zir_loaded = true; - file.stat = .{ - .size = header.stat_size, - .inode = header.stat_inode, - .mtime = header.stat_mtime, - }; - file.status = .success_zir; - log.debug("AstGen cached success: {s}", .{file.sub_file_path}); - - // TODO don't report compile errors until Sema @importFile - if (file.zir.hasCompileErrors()) { - { - comp.mutex.lock(); - defer comp.mutex.unlock(); - try zcu.failed_files.putNoClobber(gpa, file, null); - } - file.status = .astgen_failure; - return error.AnalysisFail; - } - return; - } - - // If we already have the exclusive lock then it is our job to update. - if (builtin.os.tag == .wasi or lock == .exclusive) break; - // Otherwise, unlock to give someone a chance to get the exclusive lock - // and then upgrade to an exclusive lock. - cache_file.unlock(); - lock = .exclusive; - try cache_file.lock(lock); - } - - // The cache is definitely stale so delete the contents to avoid an underwrite later. - cache_file.setEndPos(0) catch |err| switch (err) { - error.FileTooBig => unreachable, // 0 is not too big - - else => |e| return e, - }; - - zcu.lockAndClearFileCompileError(file); - - // If the previous ZIR does not have compile errors, keep it around - // in case parsing or new ZIR fails. In case of successful ZIR update - // at the end of this function we will free it. - // We keep the previous ZIR loaded so that we can use it - // for the update next time it does not have any compile errors. This avoids - // needlessly tossing out semantic analysis work when an error is - // temporarily introduced. - if (file.zir_loaded and !file.zir.hasCompileErrors()) { - assert(file.prev_zir == null); - const prev_zir_ptr = try gpa.create(Zir); - file.prev_zir = prev_zir_ptr; - prev_zir_ptr.* = file.zir; - file.zir = undefined; - file.zir_loaded = false; - } - file.unload(gpa); - - if (stat.size > std.math.maxInt(u32)) - return error.FileTooBig; - - const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); - defer if (!file.source_loaded) gpa.free(source); - const amt = try source_file.readAll(source); - if (amt != stat.size) - return error.UnexpectedEndOfFile; - - file.stat = .{ - .size = stat.size, - .inode = stat.inode, - .mtime = stat.mtime, - }; - file.source = source; - file.source_loaded = true; - - file.tree = try Ast.parse(gpa, source, .zig); - file.tree_loaded = true; - - // Any potential AST errors are converted to ZIR errors here. - file.zir = try AstGen.generate(gpa, file.tree); - file.zir_loaded = true; - file.status = .success_zir; - log.debug("AstGen fresh success: {s}", .{file.sub_file_path}); - - const safety_buffer = if (data_has_safety_tag) - try gpa.alloc([8]u8, file.zir.instructions.len) - else - undefined; - defer if (data_has_safety_tag) gpa.free(safety_buffer); - const data_ptr = if (data_has_safety_tag) - if (file.zir.instructions.len == 0) - @as([*]const u8, undefined) - else - @as([*]const u8, @ptrCast(safety_buffer.ptr)) - else - @as([*]const u8, @ptrCast(file.zir.instructions.items(.data).ptr)); - if (data_has_safety_tag) { - // The `Data` union has a safety tag but in the file format we store it without. - for (file.zir.instructions.items(.data), 0..) |*data, i| { - const as_struct = @as(*const HackDataLayout, @ptrCast(data)); - safety_buffer[i] = as_struct.data; - } - } - - const header: Zir.Header = .{ - .instructions_len = @as(u32, @intCast(file.zir.instructions.len)), - .string_bytes_len = @as(u32, @intCast(file.zir.string_bytes.len)), - .extra_len = @as(u32, @intCast(file.zir.extra.len)), - - .stat_size = stat.size, - .stat_inode = stat.inode, - .stat_mtime = stat.mtime, - }; - var iovecs = [_]std.posix.iovec_const{ - .{ - .base = @as([*]const u8, @ptrCast(&header)), - .len = @sizeOf(Zir.Header), - }, - .{ - .base = @as([*]const u8, @ptrCast(file.zir.instructions.items(.tag).ptr)), - .len = file.zir.instructions.len, - }, - .{ - .base = data_ptr, - .len = file.zir.instructions.len * 8, - }, - .{ - .base = file.zir.string_bytes.ptr, - .len = file.zir.string_bytes.len, - }, - .{ - .base = @as([*]const u8, @ptrCast(file.zir.extra.ptr)), - .len = file.zir.extra.len * 4, - }, - }; - cache_file.writevAll(&iovecs) catch |err| { - log.warn("unable to write cached ZIR code for {}{s} to {}{s}: {s}", .{ - file.mod.root, file.sub_file_path, cache_directory, &hex_digest, @errorName(err), - }); - }; - - if (file.zir.hasCompileErrors()) { - { - comp.mutex.lock(); - defer comp.mutex.unlock(); - try zcu.failed_files.putNoClobber(gpa, file, null); - } - file.status = .astgen_failure; - return error.AnalysisFail; - } - - if (file.prev_zir) |prev_zir| { - try updateZirRefs(zcu, file, file_index, prev_zir.*); - // No need to keep previous ZIR. - prev_zir.deinit(gpa); - gpa.destroy(prev_zir); - file.prev_zir = null; - } - - if (opt_root_decl.unwrap()) |root_decl| { - // The root of this file must be re-analyzed, since the file has changed. - comp.mutex.lock(); - defer comp.mutex.unlock(); - - log.debug("outdated root Decl: {}", .{root_decl}); - try zcu.outdated_file_root.put(gpa, root_decl, {}); - } -} - pub fn loadZirCache(gpa: Allocator, cache_file: std.fs.File) !Zir { return loadZirCacheBody(gpa, try cache_file.reader().readStruct(Zir.Header), cache_file); } -fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) !Zir { +pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) !Zir { var instructions: std.MultiArrayList(Zir.Inst) = .{}; errdefer instructions.deinit(gpa); @@ -2930,127 +2652,6 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) return zir; } -/// This is called from the AstGen thread pool, so must acquire -/// the Compilation mutex when acting on shared state. -fn updateZirRefs(zcu: *Module, file: *File, file_index: File.Index, old_zir: Zir) !void { - const gpa = zcu.gpa; - const new_zir = file.zir; - - var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}; - defer inst_map.deinit(gpa); - - try mapOldZirToNew(gpa, old_zir, new_zir, &inst_map); - - const old_tag = old_zir.instructions.items(.tag); - const old_data = old_zir.instructions.items(.data); - - // TODO: this should be done after all AstGen workers complete, to avoid - // iterating over this full set for every updated file. - for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| { - const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw); - if (ti.file != file_index) continue; - const old_inst = ti.inst; - ti.inst = inst_map.get(ti.inst) orelse { - // Tracking failed for this instruction. Invalidate associated `src_hash` deps. - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - log.debug("tracking failed for %{d}", .{old_inst}); - try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); - continue; - }; - - if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { - if (new_zir.getAssociatedSrcHash(ti.inst)) |new_hash| { - if (std.zig.srcHashEql(old_hash, new_hash)) { - break :hash_changed; - } - log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ - old_inst, - ti.inst, - std.fmt.fmtSliceHexLower(&old_hash), - std.fmt.fmtSliceHexLower(&new_hash), - }); - } - // The source hash associated with this instruction changed - invalidate relevant dependencies. - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); - } - - // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. - const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { - .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { - .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, - else => false, - }, - else => false, - }; - if (!has_namespace) continue; - - var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; - defer old_names.deinit(zcu.gpa); - { - var it = old_zir.declIterator(old_inst); - while (it.next()) |decl_inst| { - const decl_name = old_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(old_zir)) continue, - } - const name_zir = decl_name.toString(old_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - old_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - try old_names.put(zcu.gpa, name_ip, {}); - } - } - var any_change = false; - { - var it = new_zir.declIterator(ti.inst); - while (it.next()) |decl_inst| { - const decl_name = old_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(old_zir)) continue, - } - const name_zir = decl_name.toString(old_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - old_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - if (!old_names.swapRemove(name_ip)) continue; - // Name added - any_change = true; - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace_name = .{ - .namespace = ti_idx, - .name = name_ip, - } }); - } - } - // The only elements remaining in `old_names` now are any names which were removed. - for (old_names.keys()) |name_ip| { - any_change = true; - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace_name = .{ - .namespace = ti_idx, - .name = name_ip, - } }); - } - - if (any_change) { - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace = ti_idx }); - } - } -} - pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void { log.debug("outdated dependee: {}", .{dependee}); var it = zcu.intern_pool.dependencyIterator(dependee); @@ -3695,268 +3296,6 @@ fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) return bin; } -pub fn scanNamespace( - zcu: *Zcu, - namespace_index: Namespace.Index, - decls: []const Zir.Inst.Index, - parent_decl: *Decl, -) Allocator.Error!void { - const tracy = trace(@src()); - defer tracy.end(); - - const gpa = zcu.gpa; - const namespace = zcu.namespacePtr(namespace_index); - - // For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather - // than their name. We'll build an efficient mapping now, then discard the current `decls`. - var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Decl.Index) = .{}; - defer existing_by_inst.deinit(gpa); - - try existing_by_inst.ensureTotalCapacity(gpa, @intCast(namespace.decls.count())); - - for (namespace.decls.keys()) |decl_index| { - const decl = zcu.declPtr(decl_index); - existing_by_inst.putAssumeCapacityNoClobber(decl.zir_decl_index.unwrap().?, decl_index); - } - - var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; - defer seen_decls.deinit(gpa); - - try zcu.comp.work_queue.ensureUnusedCapacity(decls.len); - - namespace.decls.clearRetainingCapacity(); - try namespace.decls.ensureTotalCapacity(gpa, decls.len); - - namespace.usingnamespace_set.clearRetainingCapacity(); - - var scan_decl_iter: ScanDeclIter = .{ - .zcu = zcu, - .namespace_index = namespace_index, - .parent_decl = parent_decl, - .seen_decls = &seen_decls, - .existing_by_inst = &existing_by_inst, - .pass = .named, - }; - for (decls) |decl_inst| { - try scanDecl(&scan_decl_iter, decl_inst); - } - scan_decl_iter.pass = .unnamed; - for (decls) |decl_inst| { - try scanDecl(&scan_decl_iter, decl_inst); - } - - if (seen_decls.count() != namespace.decls.count()) { - // Do a pass over the namespace contents and remove any decls from the last update - // which were removed in this one. - var i: usize = 0; - while (i < namespace.decls.count()) { - const decl_index = namespace.decls.keys()[i]; - const decl = zcu.declPtr(decl_index); - if (!seen_decls.contains(decl.name)) { - // We must preserve namespace ordering for @typeInfo. - namespace.decls.orderedRemoveAt(i); - i -= 1; - } - } - } -} - -const ScanDeclIter = struct { - zcu: *Zcu, - namespace_index: Namespace.Index, - parent_decl: *Decl, - seen_decls: *std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), - existing_by_inst: *const std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Decl.Index), - /// Decl scanning is run in two passes, so that we can detect when a generated - /// name would clash with an explicit name and use a different one. - pass: enum { named, unnamed }, - usingnamespace_index: usize = 0, - comptime_index: usize = 0, - unnamed_test_index: usize = 0, - - fn avoidNameConflict(iter: *ScanDeclIter, comptime fmt: []const u8, args: anytype) !InternPool.NullTerminatedString { - const zcu = iter.zcu; - const gpa = zcu.gpa; - const ip = &zcu.intern_pool; - var name = try ip.getOrPutStringFmt(gpa, fmt, args, .no_embedded_nulls); - var gop = try iter.seen_decls.getOrPut(gpa, name); - var next_suffix: u32 = 0; - while (gop.found_existing) { - name = try ip.getOrPutStringFmt(gpa, "{}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls); - gop = try iter.seen_decls.getOrPut(gpa, name); - next_suffix += 1; - } - return name; - } -}; - -fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void { - const tracy = trace(@src()); - defer tracy.end(); - - const zcu = iter.zcu; - const namespace_index = iter.namespace_index; - const namespace = zcu.namespacePtr(namespace_index); - const gpa = zcu.gpa; - const zir = namespace.fileScope(zcu).zir; - const ip = &zcu.intern_pool; - - const inst_data = zir.instructions.items(.data)[@intFromEnum(decl_inst)].declaration; - const extra = zir.extraData(Zir.Inst.Declaration, inst_data.payload_index); - const declaration = extra.data; - - // Every Decl needs a name. - const decl_name: InternPool.NullTerminatedString, const kind: Decl.Kind, const is_named_test: bool = switch (declaration.name) { - .@"comptime" => info: { - if (iter.pass != .unnamed) return; - const i = iter.comptime_index; - iter.comptime_index += 1; - break :info .{ - try iter.avoidNameConflict("comptime_{d}", .{i}), - .@"comptime", - false, - }; - }, - .@"usingnamespace" => info: { - // TODO: this isn't right! These should be considered unnamed. Name conflicts can happen here. - // The problem is, we need to preserve the decl ordering for `@typeInfo`. - // I'm not bothering to fix this now, since some upcoming changes will change this code significantly anyway. - if (iter.pass != .named) return; - const i = iter.usingnamespace_index; - iter.usingnamespace_index += 1; - break :info .{ - try iter.avoidNameConflict("usingnamespace_{d}", .{i}), - .@"usingnamespace", - false, - }; - }, - .unnamed_test => info: { - if (iter.pass != .unnamed) return; - const i = iter.unnamed_test_index; - iter.unnamed_test_index += 1; - break :info .{ - try iter.avoidNameConflict("test_{d}", .{i}), - .@"test", - false, - }; - }, - .decltest => info: { - // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. - if (iter.pass != .unnamed) return; - assert(declaration.flags.has_doc_comment); - const name = zir.nullTerminatedString(@enumFromInt(zir.extra[extra.end])); - break :info .{ - try iter.avoidNameConflict("decltest.{s}", .{name}), - .@"test", - true, - }; - }, - _ => if (declaration.name.isNamedTest(zir)) info: { - // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. - if (iter.pass != .unnamed) return; - break :info .{ - try iter.avoidNameConflict("test.{s}", .{zir.nullTerminatedString(declaration.name.toString(zir).?)}), - .@"test", - true, - }; - } else info: { - if (iter.pass != .named) return; - const name = try ip.getOrPutString( - gpa, - zir.nullTerminatedString(declaration.name.toString(zir).?), - .no_embedded_nulls, - ); - try iter.seen_decls.putNoClobber(gpa, name, {}); - break :info .{ - name, - .named, - false, - }; - }, - }; - - switch (kind) { - .@"usingnamespace" => try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1), - .@"test" => try zcu.test_functions.ensureUnusedCapacity(gpa, 1), - else => {}, - } - - const parent_file_scope_index = iter.parent_decl.getFileScopeIndex(zcu); - const tracked_inst = try ip.trackZir(gpa, parent_file_scope_index, decl_inst); - - // We create a Decl for it regardless of analysis status. - - const prev_exported, const decl_index = if (iter.existing_by_inst.get(tracked_inst)) |decl_index| decl_index: { - // We need only update this existing Decl. - const decl = zcu.declPtr(decl_index); - const was_exported = decl.is_exported; - assert(decl.kind == kind); // ZIR tracking should preserve this - decl.name = decl_name; - decl.is_pub = declaration.flags.is_pub; - decl.is_exported = declaration.flags.is_export; - break :decl_index .{ was_exported, decl_index }; - } else decl_index: { - // Create and set up a new Decl. - const new_decl_index = try zcu.allocateNewDecl(namespace_index); - const new_decl = zcu.declPtr(new_decl_index); - new_decl.kind = kind; - new_decl.name = decl_name; - new_decl.is_pub = declaration.flags.is_pub; - new_decl.is_exported = declaration.flags.is_export; - new_decl.zir_decl_index = tracked_inst.toOptional(); - break :decl_index .{ false, new_decl_index }; - }; - - const decl = zcu.declPtr(decl_index); - - namespace.decls.putAssumeCapacityNoClobberContext(decl_index, {}, .{ .zcu = zcu }); - - const comp = zcu.comp; - const decl_mod = namespace.fileScope(zcu).mod; - const want_analysis = declaration.flags.is_export or switch (kind) { - .anon => unreachable, - .@"comptime" => true, - .@"usingnamespace" => a: { - namespace.usingnamespace_set.putAssumeCapacityNoClobber(decl_index, declaration.flags.is_pub); - break :a true; - }, - .named => false, - .@"test" => a: { - if (!comp.config.is_test) break :a false; - if (decl_mod != zcu.main_mod) break :a false; - if (is_named_test and comp.test_filters.len > 0) { - const decl_fqn = try namespace.fullyQualifiedName(zcu, decl_name); - const decl_fqn_slice = decl_fqn.toSlice(ip); - for (comp.test_filters) |test_filter| { - if (mem.indexOf(u8, decl_fqn_slice, test_filter)) |_| break; - } else break :a false; - } - zcu.test_functions.putAssumeCapacity(decl_index, {}); // may clobber on incremental update - break :a true; - }, - }; - - if (want_analysis) { - // We will not queue analysis if the decl has been analyzed on a previous update and - // `is_export` is unchanged. In this case, the incremental update mechanism will handle - // re-analysis for us if necessary. - if (prev_exported != declaration.flags.is_export or decl.analysis == .unreferenced) { - log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{}' decl_index={d}", .{ - namespace.fileScope(zcu).sub_file_path, decl_name.fmt(ip), decl_index, - }); - comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = decl_index }); - } - } - - if (decl.getOwnedFunction(zcu) != null) { - // TODO this logic is insufficient; namespaces we don't re-scan may still require - // updated line numbers. Look into this! - // TODO Look into detecting when this would be unnecessary by storing enough state - // in `Decl` to notice that the line number did not change. - comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index }); - } -} - /// Cancel the creation of an anon decl and delete any references to it. /// If other decls depend on this decl, they must be aborted first. pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 785a5d52e0..8cf6922345 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -5,6 +5,411 @@ tid: Id, pub const Id = if (builtin.single_threaded) enum { main } else enum(usize) { main, _ }; +pub fn astGenFile( + pt: Zcu.PerThread, + file: *Zcu.File, + /// This parameter is provided separately from `file` because it is not + /// safe to access `import_table` without a lock, and this index is needed + /// in the call to `updateZirRefs`. + file_index: Zcu.File.Index, + path_digest: Cache.BinDigest, + opt_root_decl: Zcu.Decl.OptionalIndex, +) !void { + assert(!file.mod.isBuiltin()); + + const tracy = trace(@src()); + defer tracy.end(); + + const zcu = pt.zcu; + const comp = zcu.comp; + const gpa = zcu.gpa; + + // In any case we need to examine the stat of the file to determine the course of action. + var source_file = try file.mod.root.openFile(file.sub_file_path, .{}); + defer source_file.close(); + + const stat = try source_file.stat(); + + const want_local_cache = file.mod == zcu.main_mod; + const hex_digest = Cache.binToHex(path_digest); + const cache_directory = if (want_local_cache) zcu.local_zir_cache else zcu.global_zir_cache; + const zir_dir = cache_directory.handle; + + // Determine whether we need to reload the file from disk and redo parsing and AstGen. + var lock: std.fs.File.Lock = switch (file.status) { + .never_loaded, .retryable_failure => lock: { + // First, load the cached ZIR code, if any. + log.debug("AstGen checking cache: {s} (local={}, digest={s})", .{ + file.sub_file_path, want_local_cache, &hex_digest, + }); + + break :lock .shared; + }, + .parse_failure, .astgen_failure, .success_zir => lock: { + const unchanged_metadata = + stat.size == file.stat.size and + stat.mtime == file.stat.mtime and + stat.inode == file.stat.inode; + + if (unchanged_metadata) { + log.debug("unmodified metadata of file: {s}", .{file.sub_file_path}); + return; + } + + log.debug("metadata changed: {s}", .{file.sub_file_path}); + + break :lock .exclusive; + }, + }; + + // We ask for a lock in order to coordinate with other zig processes. + // If another process is already working on this file, we will get the cached + // version. Likewise if we're working on AstGen and another process asks for + // the cached file, they'll get it. + const cache_file = while (true) { + break zir_dir.createFile(&hex_digest, .{ + .read = true, + .truncate = false, + .lock = lock, + }) catch |err| switch (err) { + error.NotDir => unreachable, // no dir components + error.InvalidUtf8 => unreachable, // it's a hex encoded name + error.InvalidWtf8 => unreachable, // it's a hex encoded name + error.BadPathName => unreachable, // it's a hex encoded name + error.NameTooLong => unreachable, // it's a fixed size name + error.PipeBusy => unreachable, // it's not a pipe + error.WouldBlock => unreachable, // not asking for non-blocking I/O + // There are no dir components, so you would think that this was + // unreachable, however we have observed on macOS two processes racing + // to do openat() with O_CREAT manifest in ENOENT. + error.FileNotFound => continue, + + else => |e| return e, // Retryable errors are handled at callsite. + }; + }; + defer cache_file.close(); + + while (true) { + update: { + // First we read the header to determine the lengths of arrays. + const header = cache_file.reader().readStruct(Zir.Header) catch |err| switch (err) { + // This can happen if Zig bails out of this function between creating + // the cached file and writing it. + error.EndOfStream => break :update, + else => |e| return e, + }; + const unchanged_metadata = + stat.size == header.stat_size and + stat.mtime == header.stat_mtime and + stat.inode == header.stat_inode; + + if (!unchanged_metadata) { + log.debug("AstGen cache stale: {s}", .{file.sub_file_path}); + break :update; + } + log.debug("AstGen cache hit: {s} instructions_len={d}", .{ + file.sub_file_path, header.instructions_len, + }); + + file.zir = Zcu.loadZirCacheBody(gpa, header, cache_file) catch |err| switch (err) { + error.UnexpectedFileSize => { + log.warn("unexpected EOF reading cached ZIR for {s}", .{file.sub_file_path}); + break :update; + }, + else => |e| return e, + }; + file.zir_loaded = true; + file.stat = .{ + .size = header.stat_size, + .inode = header.stat_inode, + .mtime = header.stat_mtime, + }; + file.status = .success_zir; + log.debug("AstGen cached success: {s}", .{file.sub_file_path}); + + // TODO don't report compile errors until Sema @importFile + if (file.zir.hasCompileErrors()) { + { + comp.mutex.lock(); + defer comp.mutex.unlock(); + try zcu.failed_files.putNoClobber(gpa, file, null); + } + file.status = .astgen_failure; + return error.AnalysisFail; + } + return; + } + + // If we already have the exclusive lock then it is our job to update. + if (builtin.os.tag == .wasi or lock == .exclusive) break; + // Otherwise, unlock to give someone a chance to get the exclusive lock + // and then upgrade to an exclusive lock. + cache_file.unlock(); + lock = .exclusive; + try cache_file.lock(lock); + } + + // The cache is definitely stale so delete the contents to avoid an underwrite later. + cache_file.setEndPos(0) catch |err| switch (err) { + error.FileTooBig => unreachable, // 0 is not too big + + else => |e| return e, + }; + + pt.lockAndClearFileCompileError(file); + + // If the previous ZIR does not have compile errors, keep it around + // in case parsing or new ZIR fails. In case of successful ZIR update + // at the end of this function we will free it. + // We keep the previous ZIR loaded so that we can use it + // for the update next time it does not have any compile errors. This avoids + // needlessly tossing out semantic analysis work when an error is + // temporarily introduced. + if (file.zir_loaded and !file.zir.hasCompileErrors()) { + assert(file.prev_zir == null); + const prev_zir_ptr = try gpa.create(Zir); + file.prev_zir = prev_zir_ptr; + prev_zir_ptr.* = file.zir; + file.zir = undefined; + file.zir_loaded = false; + } + file.unload(gpa); + + if (stat.size > std.math.maxInt(u32)) + return error.FileTooBig; + + const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); + defer if (!file.source_loaded) gpa.free(source); + const amt = try source_file.readAll(source); + if (amt != stat.size) + return error.UnexpectedEndOfFile; + + file.stat = .{ + .size = stat.size, + .inode = stat.inode, + .mtime = stat.mtime, + }; + file.source = source; + file.source_loaded = true; + + file.tree = try Ast.parse(gpa, source, .zig); + file.tree_loaded = true; + + // Any potential AST errors are converted to ZIR errors here. + file.zir = try AstGen.generate(gpa, file.tree); + file.zir_loaded = true; + file.status = .success_zir; + log.debug("AstGen fresh success: {s}", .{file.sub_file_path}); + + const safety_buffer = if (Zcu.data_has_safety_tag) + try gpa.alloc([8]u8, file.zir.instructions.len) + else + undefined; + defer if (Zcu.data_has_safety_tag) gpa.free(safety_buffer); + const data_ptr = if (Zcu.data_has_safety_tag) + if (file.zir.instructions.len == 0) + @as([*]const u8, undefined) + else + @as([*]const u8, @ptrCast(safety_buffer.ptr)) + else + @as([*]const u8, @ptrCast(file.zir.instructions.items(.data).ptr)); + if (Zcu.data_has_safety_tag) { + // The `Data` union has a safety tag but in the file format we store it without. + for (file.zir.instructions.items(.data), 0..) |*data, i| { + const as_struct: *const Zcu.HackDataLayout = @ptrCast(data); + safety_buffer[i] = as_struct.data; + } + } + + const header: Zir.Header = .{ + .instructions_len = @as(u32, @intCast(file.zir.instructions.len)), + .string_bytes_len = @as(u32, @intCast(file.zir.string_bytes.len)), + .extra_len = @as(u32, @intCast(file.zir.extra.len)), + + .stat_size = stat.size, + .stat_inode = stat.inode, + .stat_mtime = stat.mtime, + }; + var iovecs = [_]std.posix.iovec_const{ + .{ + .base = @as([*]const u8, @ptrCast(&header)), + .len = @sizeOf(Zir.Header), + }, + .{ + .base = @as([*]const u8, @ptrCast(file.zir.instructions.items(.tag).ptr)), + .len = file.zir.instructions.len, + }, + .{ + .base = data_ptr, + .len = file.zir.instructions.len * 8, + }, + .{ + .base = file.zir.string_bytes.ptr, + .len = file.zir.string_bytes.len, + }, + .{ + .base = @as([*]const u8, @ptrCast(file.zir.extra.ptr)), + .len = file.zir.extra.len * 4, + }, + }; + cache_file.writevAll(&iovecs) catch |err| { + log.warn("unable to write cached ZIR code for {}{s} to {}{s}: {s}", .{ + file.mod.root, file.sub_file_path, cache_directory, &hex_digest, @errorName(err), + }); + }; + + if (file.zir.hasCompileErrors()) { + { + comp.mutex.lock(); + defer comp.mutex.unlock(); + try zcu.failed_files.putNoClobber(gpa, file, null); + } + file.status = .astgen_failure; + return error.AnalysisFail; + } + + if (file.prev_zir) |prev_zir| { + try pt.updateZirRefs(file, file_index, prev_zir.*); + // No need to keep previous ZIR. + prev_zir.deinit(gpa); + gpa.destroy(prev_zir); + file.prev_zir = null; + } + + if (opt_root_decl.unwrap()) |root_decl| { + // The root of this file must be re-analyzed, since the file has changed. + comp.mutex.lock(); + defer comp.mutex.unlock(); + + log.debug("outdated root Decl: {}", .{root_decl}); + try zcu.outdated_file_root.put(gpa, root_decl, {}); + } +} + +/// This is called from the AstGen thread pool, so must acquire +/// the Compilation mutex when acting on shared state. +fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index, old_zir: Zir) !void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const new_zir = file.zir; + + var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}; + defer inst_map.deinit(gpa); + + try Zcu.mapOldZirToNew(gpa, old_zir, new_zir, &inst_map); + + const old_tag = old_zir.instructions.items(.tag); + const old_data = old_zir.instructions.items(.data); + + // TODO: this should be done after all AstGen workers complete, to avoid + // iterating over this full set for every updated file. + for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| { + const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw); + if (ti.file != file_index) continue; + const old_inst = ti.inst; + ti.inst = inst_map.get(ti.inst) orelse { + // Tracking failed for this instruction. Invalidate associated `src_hash` deps. + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + log.debug("tracking failed for %{d}", .{old_inst}); + try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); + continue; + }; + + if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { + if (new_zir.getAssociatedSrcHash(ti.inst)) |new_hash| { + if (std.zig.srcHashEql(old_hash, new_hash)) { + break :hash_changed; + } + log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ + old_inst, + ti.inst, + std.fmt.fmtSliceHexLower(&old_hash), + std.fmt.fmtSliceHexLower(&new_hash), + }); + } + // The source hash associated with this instruction changed - invalidate relevant dependencies. + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); + } + + // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. + const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { + .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { + .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, + else => false, + }, + else => false, + }; + if (!has_namespace) continue; + + var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + defer old_names.deinit(zcu.gpa); + { + var it = old_zir.declIterator(old_inst); + while (it.next()) |decl_inst| { + const decl_name = old_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(old_zir)) continue, + } + const name_zir = decl_name.toString(old_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + old_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + try old_names.put(zcu.gpa, name_ip, {}); + } + } + var any_change = false; + { + var it = new_zir.declIterator(ti.inst); + while (it.next()) |decl_inst| { + const decl_name = old_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(old_zir)) continue, + } + const name_zir = decl_name.toString(old_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + old_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + if (!old_names.swapRemove(name_ip)) continue; + // Name added + any_change = true; + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace_name = .{ + .namespace = ti_idx, + .name = name_ip, + } }); + } + } + // The only elements remaining in `old_names` now are any names which were removed. + for (old_names.keys()) |name_ip| { + any_change = true; + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace_name = .{ + .namespace = ti_idx, + .name = name_ip, + } }); + } + + if (any_change) { + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace = ti_idx }); + } + } +} + /// Like `ensureDeclAnalyzed`, but the Decl is a file's root Decl. pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { if (pt.zcu.fileRootDecl(file_index).unwrap()) |existing_root| { @@ -91,7 +496,7 @@ pub fn ensureDeclAnalyzed(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Zcu.Sem }; } - const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); + const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0); defer decl_prog_node.end(); break :blk pt.semaDecl(decl_index) catch |err| switch (err) { @@ -290,7 +695,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai defer liveness.deinit(gpa); if (build_options.enable_debug_extensions and comp.verbose_air) { - const fqn = try decl.fullyQualifiedName(zcu); + const fqn = try decl.fullyQualifiedName(pt); std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); @import("../print_air.zig").dump(pt, air, liveness); std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)}); @@ -324,7 +729,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai }; } - const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0); + const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0); defer codegen_prog_node.end(); if (!air.typesFullyResolved(zcu)) { @@ -434,7 +839,7 @@ fn getFileRootStruct( decl.owns_tv = true; decl.analysis = .complete; - try zcu.scanNamespace(namespace_index, decls, decl); + try pt.scanNamespace(namespace_index, decls, decl); try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return wip_ty.finish(ip, decl_index, namespace_index.toOptional()); } @@ -502,7 +907,7 @@ fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: const decls = file.zir.bodySlice(extra_index, decls_len); if (!type_outdated) { - try zcu.scanNamespace(decl.src_namespace, decls, decl); + try pt.scanNamespace(decl.src_namespace, decls, decl); } return false; @@ -539,7 +944,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { zcu.setFileRootDecl(file_index, new_decl_index.toOptional()); zcu.namespacePtr(new_namespace_index).decl_index = new_decl_index; - new_decl.name = try file.fullyQualifiedName(zcu); + new_decl.name = try file.fullyQualifiedName(pt); new_decl.name_fully_qualified = true; new_decl.is_pub = true; new_decl.is_exported = false; @@ -601,9 +1006,9 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { } log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)}); - log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(zcu)).fmt(ip)}); + log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(pt)).fmt(ip)}); defer blk: { - log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(zcu) catch break :blk).fmt(ip)}); + log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(pt) catch break :blk).fmt(ip)}); } const old_has_tv = decl.has_tv; @@ -631,7 +1036,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { const std_file_root_decl_index = zcu.fileRootDecl(std_file_imported.file_index); const std_decl = zcu.declPtr(std_file_root_decl_index.unwrap().?); const std_namespace = std_decl.getInnerNamespace(zcu).?; - const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); + const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls); const builtin_decl = zcu.declPtr(std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse break :ip_index .none); const builtin_namespace = builtin_decl.getInnerNamespaceIndex(zcu).unwrap() orelse break :ip_index .none; if (decl.src_namespace != builtin_namespace) break :ip_index .none; @@ -802,7 +1207,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { } else if (bytes.len == 0) { return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{}); } - break :blk try ip.getOrPutStringOpt(gpa, bytes, .no_embedded_nulls); + break :blk try ip.getOrPutStringOpt(gpa, pt.tid, bytes, .no_embedded_nulls); }; decl.@"addrspace" = blk: { const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_val.toIntern())) { @@ -996,7 +1401,7 @@ fn newEmbedFile( } }); const array_val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, - .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, bytes.len, .maybe_embedded_nulls) }, + .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, pt.tid, bytes.len, .maybe_embedded_nulls) }, } }); const ptr_ty = (try pt.ptrType(.{ @@ -1018,7 +1423,7 @@ fn newEmbedFile( result.* = new_file; new_file.* = .{ - .sub_file_path = try ip.getOrPutString(gpa, sub_file_path, .no_embedded_nulls), + .sub_file_path = try ip.getOrPutString(gpa, pt.tid, sub_file_path, .no_embedded_nulls), .owner = pkg, .stat = stat, .val = ptr_val, @@ -1027,6 +1432,271 @@ fn newEmbedFile( return ptr_val; } +pub fn scanNamespace( + pt: Zcu.PerThread, + namespace_index: Zcu.Namespace.Index, + decls: []const Zir.Inst.Index, + parent_decl: *Zcu.Decl, +) Allocator.Error!void { + const tracy = trace(@src()); + defer tracy.end(); + + const zcu = pt.zcu; + const gpa = zcu.gpa; + const namespace = zcu.namespacePtr(namespace_index); + + // For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather + // than their name. We'll build an efficient mapping now, then discard the current `decls`. + var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Zcu.Decl.Index) = .{}; + defer existing_by_inst.deinit(gpa); + + try existing_by_inst.ensureTotalCapacity(gpa, @intCast(namespace.decls.count())); + + for (namespace.decls.keys()) |decl_index| { + const decl = zcu.declPtr(decl_index); + existing_by_inst.putAssumeCapacityNoClobber(decl.zir_decl_index.unwrap().?, decl_index); + } + + var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + defer seen_decls.deinit(gpa); + + try zcu.comp.work_queue.ensureUnusedCapacity(decls.len); + + namespace.decls.clearRetainingCapacity(); + try namespace.decls.ensureTotalCapacity(gpa, decls.len); + + namespace.usingnamespace_set.clearRetainingCapacity(); + + var scan_decl_iter: ScanDeclIter = .{ + .pt = pt, + .namespace_index = namespace_index, + .parent_decl = parent_decl, + .seen_decls = &seen_decls, + .existing_by_inst = &existing_by_inst, + .pass = .named, + }; + for (decls) |decl_inst| { + try scan_decl_iter.scanDecl(decl_inst); + } + scan_decl_iter.pass = .unnamed; + for (decls) |decl_inst| { + try scan_decl_iter.scanDecl(decl_inst); + } + + if (seen_decls.count() != namespace.decls.count()) { + // Do a pass over the namespace contents and remove any decls from the last update + // which were removed in this one. + var i: usize = 0; + while (i < namespace.decls.count()) { + const decl_index = namespace.decls.keys()[i]; + const decl = zcu.declPtr(decl_index); + if (!seen_decls.contains(decl.name)) { + // We must preserve namespace ordering for @typeInfo. + namespace.decls.orderedRemoveAt(i); + i -= 1; + } + } + } +} + +const ScanDeclIter = struct { + pt: Zcu.PerThread, + namespace_index: Zcu.Namespace.Index, + parent_decl: *Zcu.Decl, + seen_decls: *std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), + existing_by_inst: *const std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Zcu.Decl.Index), + /// Decl scanning is run in two passes, so that we can detect when a generated + /// name would clash with an explicit name and use a different one. + pass: enum { named, unnamed }, + usingnamespace_index: usize = 0, + comptime_index: usize = 0, + unnamed_test_index: usize = 0, + + fn avoidNameConflict(iter: *ScanDeclIter, comptime fmt: []const u8, args: anytype) !InternPool.NullTerminatedString { + const pt = iter.pt; + const gpa = pt.zcu.gpa; + const ip = &pt.zcu.intern_pool; + var name = try ip.getOrPutStringFmt(gpa, pt.tid, fmt, args, .no_embedded_nulls); + var gop = try iter.seen_decls.getOrPut(gpa, name); + var next_suffix: u32 = 0; + while (gop.found_existing) { + name = try ip.getOrPutStringFmt(gpa, pt.tid, "{}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls); + gop = try iter.seen_decls.getOrPut(gpa, name); + next_suffix += 1; + } + return name; + } + + fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void { + const tracy = trace(@src()); + defer tracy.end(); + + const pt = iter.pt; + const zcu = pt.zcu; + const namespace_index = iter.namespace_index; + const namespace = zcu.namespacePtr(namespace_index); + const gpa = zcu.gpa; + const zir = namespace.fileScope(zcu).zir; + const ip = &zcu.intern_pool; + + const inst_data = zir.instructions.items(.data)[@intFromEnum(decl_inst)].declaration; + const extra = zir.extraData(Zir.Inst.Declaration, inst_data.payload_index); + const declaration = extra.data; + + // Every Decl needs a name. + const decl_name: InternPool.NullTerminatedString, const kind: Zcu.Decl.Kind, const is_named_test: bool = switch (declaration.name) { + .@"comptime" => info: { + if (iter.pass != .unnamed) return; + const i = iter.comptime_index; + iter.comptime_index += 1; + break :info .{ + try iter.avoidNameConflict("comptime_{d}", .{i}), + .@"comptime", + false, + }; + }, + .@"usingnamespace" => info: { + // TODO: this isn't right! These should be considered unnamed. Name conflicts can happen here. + // The problem is, we need to preserve the decl ordering for `@typeInfo`. + // I'm not bothering to fix this now, since some upcoming changes will change this code significantly anyway. + if (iter.pass != .named) return; + const i = iter.usingnamespace_index; + iter.usingnamespace_index += 1; + break :info .{ + try iter.avoidNameConflict("usingnamespace_{d}", .{i}), + .@"usingnamespace", + false, + }; + }, + .unnamed_test => info: { + if (iter.pass != .unnamed) return; + const i = iter.unnamed_test_index; + iter.unnamed_test_index += 1; + break :info .{ + try iter.avoidNameConflict("test_{d}", .{i}), + .@"test", + false, + }; + }, + .decltest => info: { + // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. + if (iter.pass != .unnamed) return; + assert(declaration.flags.has_doc_comment); + const name = zir.nullTerminatedString(@enumFromInt(zir.extra[extra.end])); + break :info .{ + try iter.avoidNameConflict("decltest.{s}", .{name}), + .@"test", + true, + }; + }, + _ => if (declaration.name.isNamedTest(zir)) info: { + // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. + if (iter.pass != .unnamed) return; + break :info .{ + try iter.avoidNameConflict("test.{s}", .{zir.nullTerminatedString(declaration.name.toString(zir).?)}), + .@"test", + true, + }; + } else info: { + if (iter.pass != .named) return; + const name = try ip.getOrPutString( + gpa, + pt.tid, + zir.nullTerminatedString(declaration.name.toString(zir).?), + .no_embedded_nulls, + ); + try iter.seen_decls.putNoClobber(gpa, name, {}); + break :info .{ + name, + .named, + false, + }; + }, + }; + + switch (kind) { + .@"usingnamespace" => try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1), + .@"test" => try zcu.test_functions.ensureUnusedCapacity(gpa, 1), + else => {}, + } + + const parent_file_scope_index = iter.parent_decl.getFileScopeIndex(zcu); + const tracked_inst = try ip.trackZir(gpa, parent_file_scope_index, decl_inst); + + // We create a Decl for it regardless of analysis status. + + const prev_exported, const decl_index = if (iter.existing_by_inst.get(tracked_inst)) |decl_index| decl_index: { + // We need only update this existing Decl. + const decl = zcu.declPtr(decl_index); + const was_exported = decl.is_exported; + assert(decl.kind == kind); // ZIR tracking should preserve this + decl.name = decl_name; + decl.is_pub = declaration.flags.is_pub; + decl.is_exported = declaration.flags.is_export; + break :decl_index .{ was_exported, decl_index }; + } else decl_index: { + // Create and set up a new Decl. + const new_decl_index = try zcu.allocateNewDecl(namespace_index); + const new_decl = zcu.declPtr(new_decl_index); + new_decl.kind = kind; + new_decl.name = decl_name; + new_decl.is_pub = declaration.flags.is_pub; + new_decl.is_exported = declaration.flags.is_export; + new_decl.zir_decl_index = tracked_inst.toOptional(); + break :decl_index .{ false, new_decl_index }; + }; + + const decl = zcu.declPtr(decl_index); + + namespace.decls.putAssumeCapacityNoClobberContext(decl_index, {}, .{ .zcu = zcu }); + + const comp = zcu.comp; + const decl_mod = namespace.fileScope(zcu).mod; + const want_analysis = declaration.flags.is_export or switch (kind) { + .anon => unreachable, + .@"comptime" => true, + .@"usingnamespace" => a: { + namespace.usingnamespace_set.putAssumeCapacityNoClobber(decl_index, declaration.flags.is_pub); + break :a true; + }, + .named => false, + .@"test" => a: { + if (!comp.config.is_test) break :a false; + if (decl_mod != zcu.main_mod) break :a false; + if (is_named_test and comp.test_filters.len > 0) { + const decl_fqn = try namespace.fullyQualifiedName(pt, decl_name); + const decl_fqn_slice = decl_fqn.toSlice(ip); + for (comp.test_filters) |test_filter| { + if (std.mem.indexOf(u8, decl_fqn_slice, test_filter)) |_| break; + } else break :a false; + } + zcu.test_functions.putAssumeCapacity(decl_index, {}); // may clobber on incremental update + break :a true; + }, + }; + + if (want_analysis) { + // We will not queue analysis if the decl has been analyzed on a previous update and + // `is_export` is unchanged. In this case, the incremental update mechanism will handle + // re-analysis for us if necessary. + if (prev_exported != declaration.flags.is_export or decl.analysis == .unreferenced) { + log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{}' decl_index={d}", .{ + namespace.fileScope(zcu).sub_file_path, decl_name.fmt(ip), decl_index, + }); + comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = decl_index }); + } + } + + if (decl.getOwnedFunction(zcu) != null) { + // TODO this logic is insufficient; namespaces we don't re-scan may still require + // updated line numbers. Look into this! + // TODO Look into detecting when this would be unnecessary by storing enough state + // in `Decl` to notice that the line number did not change. + comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index }); + } + } +}; + pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: Allocator) Zcu.SemaError!Air { const tracy = trace(@src()); defer tracy.end(); @@ -1038,12 +1708,12 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); - log.debug("func name '{}'", .{(try decl.fullyQualifiedName(mod)).fmt(ip)}); + log.debug("func name '{}'", .{(try decl.fullyQualifiedName(pt)).fmt(ip)}); defer blk: { - log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)}); + log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(pt) catch break :blk).fmt(ip)}); } - const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); + const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0); defer decl_prog_node.end(); mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); @@ -1273,6 +1943,19 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All }; } +fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void { + switch (file.status) { + .success_zir, .retryable_failure => {}, + .never_loaded, .parse_failure, .astgen_failure => { + pt.zcu.comp.mutex.lock(); + defer pt.zcu.comp.mutex.unlock(); + if (pt.zcu.failed_files.fetchSwapRemove(file)) |kv| { + if (kv.value) |msg| msg.destroy(pt.zcu.gpa); // Delete previous error message. + } + }, + } +} + /// Called from `Compilation.update`, after everything is done, just before /// reporting compile errors. In this function we emit exported symbol collision /// errors and communicate exported symbols to the linker backend. @@ -1397,7 +2080,7 @@ pub fn populateTestFunctions( const root_decl_index = zcu.fileRootDecl(builtin_file_index); const root_decl = zcu.declPtr(root_decl_index.unwrap().?); const builtin_namespace = zcu.namespacePtr(root_decl.src_namespace); - const test_functions_str = try ip.getOrPutString(gpa, "test_functions", .no_embedded_nulls); + const test_functions_str = try ip.getOrPutString(gpa, pt.tid, "test_functions", .no_embedded_nulls); const decl_index = builtin_namespace.decls.getKeyAdapted( test_functions_str, Zcu.DeclAdapter{ .zcu = zcu }, @@ -1424,7 +2107,7 @@ pub fn populateTestFunctions( for (test_fn_vals, zcu.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = zcu.declPtr(test_decl_index); - const test_decl_name = try test_decl.fullyQualifiedName(zcu); + const test_decl_name = try test_decl.fullyQualifiedName(pt); const test_decl_name_len = test_decl_name.length(ip); const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = n: { const test_name_ty = try pt.arrayType(.{ @@ -1530,7 +2213,7 @@ pub fn linkerUpdateDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !void { const decl = zcu.declPtr(decl_index); - const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool), 0); + const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(&zcu.intern_pool), 0); defer codegen_prog_node.end(); if (comp.bin_file) |lf| { @@ -2064,11 +2747,11 @@ pub fn getBuiltinDecl(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Inter const std_file_imported = zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig"); const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index).unwrap().?; const std_namespace = zcu.declPtr(std_file_root_decl).getOwnedInnerNamespace(zcu).?; - const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); + const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls); const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); pt.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt"); const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt"); - const name_str = try ip.getOrPutString(gpa, name, .no_embedded_nulls); + const name_str = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls); return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt"); } @@ -2082,6 +2765,8 @@ pub fn getBuiltinType(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Type const Air = @import("../Air.zig"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; +const Ast = std.zig.Ast; +const AstGen = std.zig.AstGen; const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const build_options = @import("build_options"); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index a8ac674e07..8873c5cb1b 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2204,14 +2204,14 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const func_val = (try func.air.value(pl_op.operand, pt)) orelse break :blk null; if (func_val.getFunction(mod)) |function| { - _ = try func.bin_file.getOrCreateAtomForDecl(function.owner_decl); + _ = try func.bin_file.getOrCreateAtomForDecl(pt, function.owner_decl); break :blk function.owner_decl; } else if (func_val.getExternFunc(mod)) |extern_func| { const ext_decl = mod.declPtr(extern_func.decl); const ext_info = mod.typeToFunc(ext_decl.typeOf(mod)).?; var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), Type.fromInterned(ext_info.return_type), pt); defer func_type.deinit(func.gpa); - const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl); + const atom_index = try func.bin_file.getOrCreateAtomForDecl(pt, extern_func.decl); const atom = func.bin_file.getAtomPtr(atom_index); const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type); try func.bin_file.addOrUpdateImport( @@ -2224,7 +2224,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else switch (mod.intern_pool.indexToKey(func_val.ip_index)) { .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { .decl => |decl| { - _ = try func.bin_file.getOrCreateAtomForDecl(decl); + _ = try func.bin_file.getOrCreateAtomForDecl(pt, decl); break :blk decl; }, else => {}, @@ -3227,7 +3227,7 @@ fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u return WValue{ .imm32 = 0xaaaaaaaa }; } - const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index); + const atom_index = try func.bin_file.getOrCreateAtomForDecl(pt, decl_index); const atom = func.bin_file.getAtom(atom_index); const target_sym_index = @intFromEnum(atom.sym_index); @@ -7284,7 +7284,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_decl_index).fullyQualifiedName(mod); + const fqn = try mod.declPtr(enum_decl_index).fullyQualifiedName(pt); const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{fqn.fmt(ip)}); // check if we already generated code for this. diff --git a/src/codegen.zig b/src/codegen.zig index 5fc8ef174f..0513682d73 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -756,7 +756,7 @@ fn lowerDeclRef( return Result.ok; } - const vaddr = try lf.getDeclVAddr(decl_index, .{ + const vaddr = try lf.getDeclVAddr(pt, decl_index, .{ .parent_atom_index = reloc_info.parent_atom_index, .offset = code.items.len, .addend = @intCast(offset), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ca574070bf..0f13c9fd9b 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1744,7 +1744,7 @@ pub const Object = struct { if (export_indices.len != 0) { return updateExportedGlobal(self, zcu, global_index, export_indices); } else { - const fqn = try self.builder.strtabString((try decl.fullyQualifiedName(zcu)).toSlice(ip)); + const fqn = try self.builder.strtabString((try decl.fullyQualifiedName(pt)).toSlice(ip)); try global_index.rename(fqn, &self.builder); global_index.setLinkage(.internal, &self.builder); if (comp.config.dll_export_fns) @@ -2520,7 +2520,7 @@ pub const Object = struct { const field_offset = ty.structFieldOffset(field_index, pt); const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse - try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); + try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); fields.appendAssumeCapacity(try o.builder.debugMemberType( try o.builder.metadataString(field_name.toSlice(ip)), @@ -2807,17 +2807,18 @@ pub const Object = struct { } fn getStackTraceType(o: *Object) Allocator.Error!Type { - const zcu = o.pt.zcu; + const pt = o.pt; + const zcu = pt.zcu; const std_mod = zcu.std_mod; const std_file_imported = zcu.importPkg(std_mod) catch unreachable; - const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "builtin", .no_embedded_nulls); + const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "builtin", .no_embedded_nulls); const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index); const std_namespace = zcu.namespacePtr(zcu.declPtr(std_file_root_decl.unwrap().?).src_namespace); const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }).?; - const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "StackTrace", .no_embedded_nulls); + const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "StackTrace", .no_embedded_nulls); // buffer is only used for int_type, `builtin` is a struct. const builtin_ty = zcu.declPtr(builtin_decl).val.toType(); const builtin_namespace = zcu.namespacePtrUnwrap(builtin_ty.getNamespaceIndex(zcu)).?; @@ -2865,7 +2866,7 @@ pub const Object = struct { try o.builder.strtabString((if (is_extern) decl.name else - try decl.fullyQualifiedName(zcu)).toSlice(ip)), + try decl.fullyQualifiedName(pt)).toSlice(ip)), toLlvmAddressSpace(decl.@"addrspace", target), ); gop.value_ptr.* = function_index.ptrConst(&o.builder).global; @@ -3074,7 +3075,8 @@ pub const Object = struct { if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable; errdefer assert(o.decl_map.remove(decl_index)); - const zcu = o.pt.zcu; + const pt = o.pt; + const zcu = pt.zcu; const decl = zcu.declPtr(decl_index); const is_extern = decl.isExtern(zcu); @@ -3082,7 +3084,7 @@ pub const Object = struct { try o.builder.strtabString((if (is_extern) decl.name else - try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool)), + try decl.fullyQualifiedName(pt)).toSlice(&zcu.intern_pool)), try o.lowerType(decl.typeOf(zcu)), toLlvmGlobalAddressSpace(decl.@"addrspace", zcu.getTarget()), ); @@ -3310,7 +3312,7 @@ pub const Object = struct { return int_ty; } - const fqn = try mod.declPtr(struct_type.decl.unwrap().?).fullyQualifiedName(mod); + const fqn = try mod.declPtr(struct_type.decl.unwrap().?).fullyQualifiedName(pt); var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){}; defer llvm_field_types.deinit(o.gpa); @@ -3464,7 +3466,7 @@ pub const Object = struct { return enum_tag_ty; } - const fqn = try mod.declPtr(union_obj.decl).fullyQualifiedName(mod); + const fqn = try mod.declPtr(union_obj.decl).fullyQualifiedName(pt); const aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[layout.most_aligned_field]); const aligned_field_llvm_ty = try o.lowerType(aligned_field_ty); @@ -3525,7 +3527,7 @@ pub const Object = struct { const gop = try o.type_map.getOrPut(o.gpa, t.toIntern()); if (!gop.found_existing) { const decl = mod.declPtr(ip.loadOpaqueType(t.toIntern()).decl); - const fqn = try decl.fullyQualifiedName(mod); + const fqn = try decl.fullyQualifiedName(pt); gop.value_ptr.* = try o.builder.opaqueType(try o.builder.string(fqn.toSlice(ip))); } return gop.value_ptr.*; @@ -4585,7 +4587,7 @@ pub const Object = struct { const usize_ty = try o.lowerType(Type.usize); const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0); - const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(zcu); + const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(pt); const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(ret_ty, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal), @@ -5173,7 +5175,7 @@ pub const FuncGen = struct { const line_number = decl.navSrcLine(zcu) + 1; self.inlined = self.wip.debug_location; - const fqn = try decl.fullyQualifiedName(zcu); + const fqn = try decl.fullyQualifiedName(pt); const fn_ty = try pt.funcType(.{ .param_types = &.{}, @@ -9707,7 +9709,7 @@ pub const FuncGen = struct { if (gop.found_existing) return gop.value_ptr.*; errdefer assert(o.named_enum_map.remove(enum_type.decl)); - const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(zcu); + const fqn = try zcu.declPtr(enum_type.decl).fullyQualifiedName(pt); const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(.i1, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 95874a5d65..92cff8b2d0 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1753,7 +1753,7 @@ const DeclGen = struct { } const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse - try ip.getOrPutStringFmt(mod.gpa, "{d}", .{field_index}, .no_embedded_nulls); + try ip.getOrPutStringFmt(mod.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); try member_types.append(try self.resolveType(field_ty, .indirect)); try member_names.append(field_name.toSlice(ip)); } @@ -3012,7 +3012,7 @@ const DeclGen = struct { // Append the actual code into the functions section. try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.fullyQualifiedName(self.pt.zcu); + const fqn = try decl.fullyQualifiedName(self.pt); try self.spv.debugName(result_id, fqn.toSlice(ip)); // Temporarily generate a test kernel declaration if this is a test function. @@ -3041,7 +3041,7 @@ const DeclGen = struct { .storage_class = final_storage_class, }); - const fqn = try decl.fullyQualifiedName(self.pt.zcu); + const fqn = try decl.fullyQualifiedName(self.pt); try self.spv.debugName(result_id, fqn.toSlice(ip)); try self.spv.declareDeclDeps(spv_decl_index, &.{}); }, @@ -3086,7 +3086,7 @@ const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {}); try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.fullyQualifiedName(self.pt.zcu); + const fqn = try decl.fullyQualifiedName(self.pt); try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{fqn.fmt(ip)}); try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{ diff --git a/src/link.zig b/src/link.zig index db19a16d4d..f407ad2f4c 100644 --- a/src/link.zig +++ b/src/link.zig @@ -424,14 +424,14 @@ pub const File = struct { } } - pub fn updateDeclLineNumber(base: *File, module: *Zcu, decl_index: InternPool.DeclIndex) UpdateDeclError!void { - const decl = module.declPtr(decl_index); + pub fn updateDeclLineNumber(base: *File, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) UpdateDeclError!void { + const decl = pt.zcu.declPtr(decl_index); assert(decl.has_tv); switch (base.tag) { .spirv, .nvptx => {}, inline else => |tag| { if (tag != .c and build_options.only_c) unreachable; - return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDeclLineNumber(module, decl_index); + return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDeclLineNumber(pt, decl_index); }, } } @@ -626,14 +626,14 @@ pub const File = struct { /// `Decl`'s address was not yet resolved, or the containing atom gets moved in virtual memory. /// May be called before or after updateFunc/updateDecl therefore it is up to the linker to allocate /// the block/atom. - pub fn getDeclVAddr(base: *File, decl_index: InternPool.DeclIndex, reloc_info: RelocInfo) !u64 { + pub fn getDeclVAddr(base: *File, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: RelocInfo) !u64 { if (build_options.only_c) @compileError("unreachable"); switch (base.tag) { .c => unreachable, .spirv => unreachable, .nvptx => unreachable, inline else => |tag| { - return @as(*tag.Type(), @fieldParentPtr("base", base)).getDeclVAddr(decl_index, reloc_info); + return @as(*tag.Type(), @fieldParentPtr("base", base)).getDeclVAddr(pt, decl_index, reloc_info); }, } } diff --git a/src/link/C.zig b/src/link/C.zig index 3db5952a4c..1a6cee068e 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -383,11 +383,11 @@ pub fn updateDecl(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) gop.value_ptr.fwd_decl = try self.addString(object.dg.fwd_decl.items); } -pub fn updateDeclLineNumber(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { // The C backend does not have the ability to fix line numbers without re-generating // the entire Decl. _ = self; - _ = zcu; + _ = pt; _ = decl_index; } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 7ef5bde6e6..bd1c96bf8b 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1176,7 +1176,7 @@ pub fn lowerUnnamedConst(self: *Coff, pt: Zcu.PerThread, val: Value, decl_index: gop.value_ptr.* = .{}; } const unnamed_consts = gop.value_ptr; - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); defer gpa.free(sym_name); @@ -1427,7 +1427,7 @@ fn updateDeclCode(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclInd const mod = pt.zcu; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); const required_alignment: u32 = @intCast(decl.getAlignment(pt).toByteUnits() orelse 0); @@ -1855,7 +1855,7 @@ pub fn flushModule(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no assert(!self.imports_count_dirty); } -pub fn getDeclVAddr(self: *Coff, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { +pub fn getDeclVAddr(self: *Coff, _: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { assert(self.llvm_object == null); const this_atom_index = try self.getOrCreateAtomForDecl(decl_index); @@ -1972,9 +1972,9 @@ pub fn getGlobalSymbol(self: *Coff, name: []const u8, lib_name_name: ?[]const u8 return global_index; } -pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { _ = self; - _ = module; + _ = pt; _ = decl_index; log.debug("TODO implement updateDeclLineNumber", .{}); } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 80c88666bc..9ae4ee3be6 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -1082,7 +1082,7 @@ pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.Dec defer tracy.end(); const decl = pt.zcu.declPtr(decl_index); - const decl_linkage_name = try decl.fullyQualifiedName(pt.zcu); + const decl_linkage_name = try decl.fullyQualifiedName(pt); log.debug("initDeclState {}{*}", .{ decl_linkage_name.fmt(&pt.zcu.intern_pool), decl }); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 39704d937c..579df0760a 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -543,7 +543,7 @@ pub fn deinit(self: *Elf) void { self.comdat_group_sections.deinit(gpa); } -pub fn getDeclVAddr(self: *Elf, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { +pub fn getDeclVAddr(self: *Elf, _: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { assert(self.llvm_object == null); return self.zigObjectPtr().?.getDeclVAddr(self, decl_index, reloc_info); } @@ -3021,9 +3021,9 @@ pub fn updateExports( return self.zigObjectPtr().?.updateExports(self, pt, exported, export_indices); } -pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *Elf, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (self.llvm_object) |_| return; - return self.zigObjectPtr().?.updateDeclLineNumber(mod, decl_index); + return self.zigObjectPtr().?.updateDeclLineNumber(pt, decl_index); } pub fn deleteExport( diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 8cfa5e701f..7a419750d4 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -908,7 +908,7 @@ fn updateDeclCode( const gpa = elf_file.base.comp.gpa; const mod = pt.zcu; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); @@ -1009,7 +1009,7 @@ fn updateTlv( const mod = pt.zcu; const gpa = mod.gpa; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl }); @@ -1286,7 +1286,7 @@ pub fn lowerUnnamedConst( } const unnamed_consts = gop.value_ptr; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); defer gpa.free(name); @@ -1466,19 +1466,19 @@ pub fn updateExports( /// Must be called only after a successful call to `updateDecl`. pub fn updateDeclLineNumber( self: *ZigObject, - mod: *Module, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, ) !void { const tracy = trace(@src()); defer tracy.end(); - const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl = pt.zcu.declPtr(decl_index); + const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); + log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&pt.zcu.intern_pool), decl }); if (self.dwarf) |*dw| { - try dw.updateDeclLineNumber(mod, decl_index); + try dw.updateDeclLineNumber(pt.zcu, decl_index); } } diff --git a/src/link/MachO.zig b/src/link/MachO.zig index d0c78bc2c2..ff083d367c 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -3198,9 +3198,9 @@ pub fn updateDecl(self: *MachO, pt: Zcu.PerThread, decl_index: InternPool.DeclIn return self.getZigObject().?.updateDecl(self, pt, decl_index); } -pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *MachO, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (self.llvm_object) |_| return; - return self.getZigObject().?.updateDeclLineNumber(module, decl_index); + return self.getZigObject().?.updateDeclLineNumber(pt, decl_index); } pub fn updateExports( @@ -3230,7 +3230,7 @@ pub fn freeDecl(self: *MachO, decl_index: InternPool.DeclIndex) void { return self.getZigObject().?.freeDecl(decl_index); } -pub fn getDeclVAddr(self: *MachO, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { +pub fn getDeclVAddr(self: *MachO, _: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { assert(self.llvm_object == null); return self.getZigObject().?.getDeclVAddr(self, decl_index, reloc_info); } diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index ffe362038d..03e659c497 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -810,7 +810,7 @@ fn updateDeclCode( const gpa = macho_file.base.comp.gpa; const mod = pt.zcu; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); @@ -893,13 +893,12 @@ fn updateTlv( sect_index: u8, code: []const u8, ) !void { - const mod = pt.zcu; - const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl = pt.zcu.declPtr(decl_index); + const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl }); + log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&pt.zcu.intern_pool), decl }); - const decl_name_slice = decl_name.toSlice(&mod.intern_pool); + const decl_name_slice = decl_name.toSlice(&pt.zcu.intern_pool); const required_alignment = decl.getAlignment(pt); // 1. Lower TLV initializer @@ -1100,7 +1099,7 @@ pub fn lowerUnnamedConst( } const unnamed_consts = gop.value_ptr; const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); defer gpa.free(name); @@ -1363,9 +1362,9 @@ fn updateLazySymbol( } /// Must be called only after a successful call to `updateDecl`. -pub fn updateDeclLineNumber(self: *ZigObject, mod: *Module, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *ZigObject, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (self.dwarf) |*dw| { - try dw.updateDeclLineNumber(mod, decl_index); + try dw.updateDeclLineNumber(pt.zcu, decl_index); } } diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 827c974180..cfc8435906 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -483,7 +483,7 @@ pub fn lowerUnnamedConst(self: *Plan9, pt: Zcu.PerThread, val: Value, decl_index } const unnamed_consts = gop.value_ptr; - const decl_name = try decl.fullyQualifiedName(mod); + const decl_name = try decl.fullyQualifiedName(pt); const index = unnamed_consts.items.len; // name is freed when the unnamed const is freed @@ -1496,22 +1496,22 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { } /// Must be called only after a successful call to `updateDecl`. -pub fn updateDeclLineNumber(self: *Plan9, mod: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(self: *Plan9, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { _ = self; - _ = mod; + _ = pt; _ = decl_index; } pub fn getDeclVAddr( self: *Plan9, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo, ) !u64 { - const mod = self.base.comp.module.?; - const ip = &mod.intern_pool; - const decl = mod.declPtr(decl_index); + const ip = &pt.zcu.intern_pool; + const decl = pt.zcu.declPtr(decl_index); log.debug("getDeclVAddr for {}", .{decl.name.fmt(ip)}); - if (decl.isExtern(mod)) { + if (decl.isExtern(pt.zcu)) { if (decl.name.eqlSlice("etext", ip)) { try self.addReloc(reloc_info.parent_atom_index, .{ .target = undefined, diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 4e661e33e4..32af004132 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1457,9 +1457,9 @@ pub fn updateDecl(wasm: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclInd try wasm.zigObjectPtr().?.updateDecl(wasm, pt, decl_index); } -pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber(wasm: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { if (wasm.llvm_object) |_| return; - try wasm.zigObjectPtr().?.updateDeclLineNumber(mod, decl_index); + try wasm.zigObjectPtr().?.updateDeclLineNumber(pt, decl_index); } /// From a given symbol location, returns its `wasm.GlobalType`. @@ -1521,10 +1521,11 @@ pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !Sy /// Returns the given pointer address pub fn getDeclVAddr( wasm: *Wasm, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo, ) !u64 { - return wasm.zigObjectPtr().?.getDeclVAddr(wasm, decl_index, reloc_info); + return wasm.zigObjectPtr().?.getDeclVAddr(wasm, pt, decl_index, reloc_info); } pub fn lowerAnonDecl( @@ -4016,8 +4017,8 @@ pub fn getErrorTableSymbol(wasm_file: *Wasm, pt: Zcu.PerThread) !u32 { /// For a given `InternPool.DeclIndex` returns its corresponding `Atom.Index`. /// When the index was not found, a new `Atom` will be created, and its index will be returned. /// The newly created Atom is empty with default fields as specified by `Atom.empty`. -pub fn getOrCreateAtomForDecl(wasm_file: *Wasm, decl_index: InternPool.DeclIndex) !Atom.Index { - return wasm_file.zigObjectPtr().?.getOrCreateAtomForDecl(wasm_file, decl_index); +pub fn getOrCreateAtomForDecl(wasm_file: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !Atom.Index { + return wasm_file.zigObjectPtr().?.getOrCreateAtomForDecl(wasm_file, pt, decl_index); } /// Verifies all resolved symbols and checks whether itself needs to be marked alive, diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index a693902743..f95c8fc794 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -253,7 +253,7 @@ pub fn updateDecl( } const gpa = wasm_file.base.comp.gpa; - const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const atom = wasm_file.getAtomPtr(atom_index); atom.clear(); @@ -302,7 +302,7 @@ pub fn updateFunc( const func = pt.zcu.funcInfo(func_index); const decl_index = func.owner_decl; const decl = pt.zcu.declPtr(decl_index); - const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const atom = wasm_file.getAtomPtr(atom_index); atom.clear(); @@ -346,7 +346,7 @@ fn finishUpdateDecl( const atom_index = decl_info.atom; const atom = wasm_file.getAtomPtr(atom_index); const sym = zig_object.symbol(atom.sym_index); - const full_name = try decl.fullyQualifiedName(zcu); + const full_name = try decl.fullyQualifiedName(pt); sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(ip)); try atom.code.appendSlice(gpa, code); atom.size = @intCast(code.len); @@ -424,17 +424,21 @@ fn createDataSegment( /// For a given `InternPool.DeclIndex` returns its corresponding `Atom.Index`. /// When the index was not found, a new `Atom` will be created, and its index will be returned. /// The newly created Atom is empty with default fields as specified by `Atom.empty`. -pub fn getOrCreateAtomForDecl(zig_object: *ZigObject, wasm_file: *Wasm, decl_index: InternPool.DeclIndex) !Atom.Index { - const gpa = wasm_file.base.comp.gpa; +pub fn getOrCreateAtomForDecl( + zig_object: *ZigObject, + wasm_file: *Wasm, + pt: Zcu.PerThread, + decl_index: InternPool.DeclIndex, +) !Atom.Index { + const gpa = pt.zcu.gpa; const gop = try zig_object.decls_map.getOrPut(gpa, decl_index); if (!gop.found_existing) { const sym_index = try zig_object.allocateSymbol(gpa); gop.value_ptr.* = .{ .atom = try wasm_file.createAtom(sym_index, zig_object.index) }; - const mod = wasm_file.base.comp.module.?; - const decl = mod.declPtr(decl_index); - const full_name = try decl.fullyQualifiedName(mod); + const decl = pt.zcu.declPtr(decl_index); + const full_name = try decl.fullyQualifiedName(pt); const sym = zig_object.symbol(sym_index); - sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&mod.intern_pool)); + sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&pt.zcu.intern_pool)); } return gop.value_ptr.atom; } @@ -487,10 +491,10 @@ pub fn lowerUnnamedConst( std.debug.assert(val.typeOf(mod).zigTypeTag(mod) != .Fn); // cannot create local symbols for functions const decl = mod.declPtr(decl_index); - const parent_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const parent_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const parent_atom = wasm_file.getAtom(parent_atom_index); const local_index = parent_atom.locals.items.len; - const fqn = try decl.fullyQualifiedName(mod); + const fqn = try decl.fullyQualifiedName(pt); const name = try std.fmt.allocPrintZ(gpa, "__unnamed_{}_{d}", .{ fqn.fmt(&mod.intern_pool), local_index, }); @@ -775,22 +779,22 @@ pub fn getGlobalSymbol(zig_object: *ZigObject, gpa: std.mem.Allocator, name: []c pub fn getDeclVAddr( zig_object: *ZigObject, wasm_file: *Wasm, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo, ) !u64 { const target = wasm_file.base.comp.root_mod.resolved_target.result; - const gpa = wasm_file.base.comp.gpa; - const mod = wasm_file.base.comp.module.?; - const decl = mod.declPtr(decl_index); + const gpa = pt.zcu.gpa; + const decl = pt.zcu.declPtr(decl_index); - const target_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const target_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const target_symbol_index = @intFromEnum(wasm_file.getAtom(target_atom_index).sym_index); std.debug.assert(reloc_info.parent_atom_index != 0); const atom_index = wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = @enumFromInt(reloc_info.parent_atom_index) }).?; const atom = wasm_file.getAtomPtr(atom_index); const is_wasm32 = target.cpu.arch == .wasm32; - if (decl.typeOf(mod).zigTypeTag(mod) == .Fn) { + if (decl.typeOf(pt.zcu).zigTypeTag(pt.zcu) == .Fn) { std.debug.assert(reloc_info.addend == 0); // addend not allowed for function relocations try atom.relocs.append(gpa, .{ .index = target_symbol_index, @@ -890,7 +894,7 @@ pub fn updateExports( }, }; const decl = mod.declPtr(decl_index); - const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index); + const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); const decl_info = zig_object.decls_map.getPtr(decl_index).?; const atom = wasm_file.getAtom(atom_index); const atom_sym = atom.symbolLoc().getSymbol(wasm_file).*; @@ -1116,13 +1120,17 @@ pub fn createDebugSectionForIndex(zig_object: *ZigObject, wasm_file: *Wasm, inde return atom_index; } -pub fn updateDeclLineNumber(zig_object: *ZigObject, mod: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateDeclLineNumber( + zig_object: *ZigObject, + pt: Zcu.PerThread, + decl_index: InternPool.DeclIndex, +) !void { if (zig_object.dwarf) |*dw| { - const decl = mod.declPtr(decl_index); - const decl_name = try decl.fullyQualifiedName(mod); + const decl = pt.zcu.declPtr(decl_index); + const decl_name = try decl.fullyQualifiedName(pt); - log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl }); - try dw.updateDeclLineNumber(mod, decl_index); + log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&pt.zcu.intern_pool), decl }); + try dw.updateDeclLineNumber(pt.zcu, decl_index); } } diff --git a/src/mutable_value.zig b/src/mutable_value.zig index 0ca2d1d317..63f198dfa7 100644 --- a/src/mutable_value.zig +++ b/src/mutable_value.zig @@ -71,7 +71,7 @@ pub const MutableValue = union(enum) { } }), .bytes => |b| try pt.intern(.{ .aggregate = .{ .ty = b.ty, - .storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, b.data, .maybe_embedded_nulls) }, + .storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, pt.tid, b.data, .maybe_embedded_nulls) }, } }), .aggregate => |a| { const elems = try arena.alloc(InternPool.Index, a.elems.len); -- cgit v1.2.3 From cda716ecc43929fd1c2c9679335b8b22f1b67d1a Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 15 Jun 2024 16:18:41 -0400 Subject: InternPool: implement thread-safe hash map --- lib/std/Thread/Pool.zig | 4 + src/Compilation.zig | 4 +- src/InternPool.zig | 635 +++++++++++++++++++++++++++++++++--------------- src/Zcu.zig | 4 +- 4 files changed, 452 insertions(+), 195 deletions(-) (limited to 'src/Compilation.zig') diff --git a/lib/std/Thread/Pool.zig b/lib/std/Thread/Pool.zig index 03ca8ffc8e..9fb3c3483a 100644 --- a/lib/std/Thread/Pool.zig +++ b/lib/std/Thread/Pool.zig @@ -291,3 +291,7 @@ pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void { return; } } + +pub fn getIdCount(pool: *Pool) usize { + return 1 + pool.threads.len; +} diff --git a/src/Compilation.zig b/src/Compilation.zig index 1f4c425bc5..7e10febf0e 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1397,7 +1397,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .error_limit = error_limit, .llvm_object = null, }; - try zcu.init(); + try zcu.init(options.thread_pool.getIdCount()); break :blk zcu; } else blk: { if (options.emit_h != null) return error.NoZigModuleForCHeader; @@ -2156,7 +2156,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { if (build_options.enable_debug_extensions and comp.verbose_generic_instances) { std.debug.print("generic instances for '{s}:0x{x}':\n", .{ comp.root_name, - @as(usize, @intFromPtr(zcu)), + @intFromPtr(zcu), }); zcu.intern_pool.dumpGenericInstances(gpa); } diff --git a/src/InternPool.zig b/src/InternPool.zig index 97fd35bf20..13fb9be24e 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2,9 +2,10 @@ //! This data structure is self-contained, with the following exceptions: //! * Module.Namespace has a pointer to Module.File -/// Maps `Key` to `Index`. `Key` objects are not stored anywhere; they are -/// constructed lazily. -map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, +local: []Local = &.{}, +shard_shift: std.math.Log2Int(usize) = 0, +shards: []Shard = &.{}, + items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, /// On 32-bit systems, this array is ignored and extra is used for everything. @@ -351,6 +352,115 @@ pub const DepEntry = extern struct { }; }; +const Local = struct { + aligned: void align(std.atomic.cache_line) = {}, + + /// node: Garbage.Node, + /// header: List.Header, + /// data: [capacity]u32, + /// tag: [capacity]Tag, + items: List, + + /// node: Garbage.Node, + /// header: List.Header, + /// extra: [capacity]u32, + extra: List, + + garbage: Garbage, + + const List = struct { + entries: [*]u32, + + const empty: List = .{ + .entries = @constCast(&[_]u32{ 0, 0 })[Header.fields_len..].ptr, + }; + + fn acquire(list: *const List) List { + return .{ .entries = @atomicLoad([*]u32, &list.entries, .acquire) }; + } + fn release(list: *List, new_list: List) void { + @atomicStore([*]u32, &list.entries, new_list.entries, .release); + } + + const Header = extern struct { + len: u32, + capacity: u32, + + const fields_len = @typeInfo(Header).Struct.fields.len; + }; + fn header(list: List) *Header { + return @ptrCast(list.entries - Header.fields_len); + } + }; + + const Garbage = std.SinglyLinkedList(struct { buf_len: usize }); + const garbage_align = @max(@alignOf(Garbage.Node), @alignOf(u32)); + + fn freeGarbage(garbage: *const Garbage.Node, gpa: Allocator) void { + gpa.free(@as([*]align(Local.garbage_align) const u8, @ptrCast(garbage))[0..garbage.data.buf_len]); + } +}; + +const Shard = struct { + aligned: void align(std.atomic.cache_line) = {}, + + mutate_mutex: std.Thread.Mutex.Recursive, + + /// node: Local.Garbage.Node, + /// header: Map.Header, + /// entries: [capacity]Map.Entry, + map: Map, + + const Map = struct { + entries: [*]u32, + + const empty: Map = .{ + .entries = @constCast(&[_]u32{ 0, 1, @intFromEnum(Index.none), 0 })[Header.fields_len..].ptr, + }; + + fn acquire(map: *const Map) Map { + return .{ .entries = @atomicLoad([*]u32, &map.entries, .acquire) }; + } + fn release(map: *Map, new_map: Map) void { + @atomicStore([*]u32, &map.entries, new_map.entries, .release); + } + + const Header = extern struct { + len: u32, + capacity: u32, + + const fields_len: u32 = @typeInfo(Header).Struct.fields.len; + + fn mask(head: *const Header) u32 { + assert(std.math.isPowerOfTwo(head.capacity)); + assert(std.math.isPowerOfTwo(Entry.fields_len)); + return (head.capacity - 1) * Entry.fields_len; + } + }; + fn header(map: Map) *Header { + return @ptrCast(map.entries - Header.fields_len); + } + + const Entry = extern struct { + index: Index, + hash: u32, + + const fields_len: u32 = @typeInfo(Entry).Struct.fields.len; + + fn acquire(entry: *const Entry) Index { + return @atomicLoad(Index, &entry.index, .acquire); + } + fn release(entry: *Entry, index: Index) void { + @atomicStore(Index, &entry.index, index, .release); + } + }; + fn at(map: Map, index: usize) *Entry { + assert(index % Entry.fields_len == 0); + return @ptrCast(&map.entries[index]); + } + }; +}; + const FieldMap = std.ArrayHashMapUnmanaged(void, void, std.array_hash_map.AutoContext(void), false); const builtin = @import("builtin"); @@ -369,20 +479,6 @@ const Zcu = @import("Zcu.zig"); const Module = Zcu; const Zir = std.zig.Zir; -const KeyAdapter = struct { - intern_pool: *const InternPool, - - pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool { - _ = b_void; - if (ctx.intern_pool.items.items(.tag)[b_map_index] == .removed) return false; - return ctx.intern_pool.indexToKey(@enumFromInt(b_map_index)).eql(a, ctx.intern_pool); - } - - pub fn hash(ctx: @This(), a: Key) u32 { - return a.hash32(ctx.intern_pool); - } -}; - /// An index into `maps` which might be `none`. pub const OptionalMapIndex = enum(u32) { none = std.math.maxInt(u32), @@ -4535,17 +4631,27 @@ pub const MemoizedCall = struct { result: Index, }; -pub fn init(ip: *InternPool, gpa: Allocator) !void { +pub fn init(ip: *InternPool, gpa: Allocator, total_threads: usize) !void { + errdefer ip.deinit(gpa); assert(ip.items.len == 0); + ip.local = try gpa.alloc(Local, total_threads); + @memset(ip.local, .{ + .items = Local.List.empty, + .extra = Local.List.empty, + .garbage = .{}, + }); + + ip.shard_shift = @intCast(std.math.log2_int_ceil(usize, total_threads)); + ip.shards = try gpa.alloc(Shard, @as(usize, 1) << ip.shard_shift); + @memset(ip.shards, .{ + .mutate_mutex = std.Thread.Mutex.Recursive.init, + .map = Shard.Map.empty, + }); + // Reserve string index 0 for an empty string. assert((try ip.getOrPutString(gpa, .main, "", .no_embedded_nulls)) == .empty); - // So that we can use `catch unreachable` below. - try ip.items.ensureUnusedCapacity(gpa, static_keys.len); - try ip.map.ensureUnusedCapacity(gpa, static_keys.len); - try ip.extra.ensureUnusedCapacity(gpa, static_keys.len); - // This inserts all the statically-known values into the intern pool in the // order expected. for (&static_keys, 0..) |key, key_index| switch (@as(Index, @enumFromInt(key_index))) { @@ -4574,12 +4680,9 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void { assert(ip.indexToKey(ip.typeOf(cc_inline)).int_type.bits == @typeInfo(@typeInfo(std.builtin.CallingConvention).Enum.tag_type).Int.bits); } - - assert(ip.items.len == static_keys.len); } pub fn deinit(ip: *InternPool, gpa: Allocator) void { - ip.map.deinit(gpa); ip.items.deinit(gpa); ip.extra.deinit(gpa); ip.limbs.deinit(gpa); @@ -4611,6 +4714,16 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.files.deinit(gpa); + gpa.free(ip.shards); + for (ip.local) |*local| { + var next = local.garbage.first; + while (next) |cur| { + next = cur.next; + Local.freeGarbage(cur, gpa); + } + } + gpa.free(ip.local); + ip.* = undefined; } @@ -5239,10 +5352,133 @@ fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key } }; } +const GetOrPutKey = union(enum) { + existing: Index, + new: struct { + shard: *Shard, + map_index: u32, + }, + + fn set(gop: *GetOrPutKey, index: Index) Index { + switch (gop.*) { + .existing => unreachable, + .new => |info| { + info.shard.map.at(info.map_index).release(index); + info.shard.map.header().len += 1; + info.shard.mutate_mutex.unlock(); + }, + } + gop.* = .{ .existing = index }; + return index; + } + + fn assign(gop: *GetOrPutKey, new_gop: GetOrPutKey) void { + gop.deinit(); + gop.* = new_gop; + } + + fn deinit(gop: *GetOrPutKey) void { + switch (gop.*) { + .existing => {}, + .new => |info| info.shard.mutate_mutex.unlock(), + } + gop.* = undefined; + } +}; +fn getOrPutKey( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + key: Key, +) Allocator.Error!GetOrPutKey { + const full_hash = key.hash64(ip); + const hash: u32 = @truncate(full_hash >> 32); + const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; + var map = shard.map.acquire(); + var map_mask = map.header().mask(); + var map_index = hash; + while (true) : (map_index += Shard.Map.Entry.fields_len) { + map_index &= map_mask; + const entry = map.at(map_index); + const index = entry.acquire(); + if (index == .none) break; + if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) + return .{ .existing = index }; + } + shard.mutate_mutex.lock(); + errdefer shard.mutate_mutex.unlock(); + if (map.entries != shard.map.entries) { + map = shard.map; + map_mask = map.header().mask(); + map_index = hash; + } + while (true) : (map_index += Shard.Map.Entry.fields_len) { + map_index &= map_mask; + const entry = map.at(map_index); + const index = entry.index; + if (index == .none) break; + if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) { + defer shard.mutate_mutex.unlock(); + return .{ .existing = index }; + } + } + const map_header = map.header().*; + if (map_header.len >= map_header.capacity * 3 / 5) { + const new_map_capacity = map_header.capacity * 2; + const new_map_buf = try gpa.alignedAlloc( + u8, + Local.garbage_align, + @sizeOf(Local.Garbage.Node) + (Shard.Map.Header.fields_len + + new_map_capacity * Shard.Map.Entry.fields_len) * @sizeOf(u32), + ); + const new_node: *Local.Garbage.Node = @ptrCast(new_map_buf.ptr); + new_node.* = .{ .data = .{ .buf_len = new_map_buf.len } }; + ip.local[@intFromEnum(tid)].garbage.prepend(new_node); + const new_map_entries = std.mem.bytesAsSlice( + u32, + new_map_buf[@sizeOf(Local.Garbage.Node)..], + )[Shard.Map.Header.fields_len..]; + const new_map: Shard.Map = .{ .entries = new_map_entries.ptr }; + new_map.header().* = .{ + .len = map_header.len, + .capacity = new_map_capacity, + }; + @memset(new_map_entries, @intFromEnum(Index.none)); + const new_map_mask = new_map.header().mask(); + map_index = 0; + while (map_index < map_header.capacity * 2) : (map_index += Shard.Map.Entry.fields_len) { + const entry = map.at(map_index); + const index = entry.index; + if (index == .none) continue; + const item_hash = entry.hash; + var new_map_index = item_hash; + while (true) : (new_map_index += Shard.Map.Entry.fields_len) { + new_map_index &= new_map_mask; + const new_entry = new_map.at(new_map_index); + if (new_entry.index != .none) continue; + new_entry.* = .{ + .index = index, + .hash = item_hash, + }; + break; + } + } + map = new_map; + map_index = hash; + while (true) : (map_index += Shard.Map.Entry.fields_len) { + map_index &= new_map_mask; + if (map.at(map_index).index == .none) break; + } + shard.map.release(new_map); + } + map.at(map_index).hash = hash; + return .{ .new = .{ .shard = shard, .map_index = map_index } }; +} + pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); - if (gop.found_existing) return @enumFromInt(gop.index); + var gop = try ip.getOrPutKey(gpa, tid, key); + defer gop.deinit(); + if (gop == .existing) return gop.existing; try ip.items.ensureUnusedCapacity(gpa, 1); switch (key) { .int_type => |int_type| { @@ -5260,18 +5496,17 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All assert(ptr_type.sentinel == .none or ip.typeOf(ptr_type.sentinel) == ptr_type.child); if (ptr_type.flags.size == .Slice) { - _ = ip.map.pop(); var new_key = key; new_key.ptr_type.flags.size = .Many; const ptr_type_index = try ip.get(gpa, tid, new_key); - assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + gop.assign(try ip.getOrPutKey(gpa, tid, key)); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ .tag = .type_slice, .data = @intFromEnum(ptr_type_index), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } var ptr_type_adjusted = ptr_type; @@ -5295,7 +5530,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .child = array_type.child, }), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } } @@ -5442,11 +5677,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, .anon_decl => |anon_decl| if (ptrsHaveSameAlignment(ip, ptr.ty, ptr_type, anon_decl.orig_ty)) item: { if (ptr.ty != anon_decl.orig_ty) { - _ = ip.map.pop(); var new_key = key; new_key.ptr.base_addr.anon_decl.orig_ty = ptr.ty; - const new_gop = try ip.map.getOrPutAdapted(gpa, new_key, adapter); - if (new_gop.found_existing) return @enumFromInt(new_gop.index); + gop.assign(try ip.getOrPutKey(gpa, tid, new_key)); + if (gop == .existing) return gop.existing; } break :item .{ .tag = .ptr_anon_decl, @@ -5486,7 +5720,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .tag = .ptr_int, .data = try ip.addExtra(gpa, PtrInt.init(ptr.ty, ptr.byte_offset)), }, - .arr_elem, .field => |base_index| item: { + .arr_elem, .field => |base_index| { const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type; switch (ptr.base_addr) { .arr_elem => assert(base_ptr_type.flags.size == .Many), @@ -5516,21 +5750,21 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, else => unreachable, } - _ = ip.map.pop(); const index_index = try ip.get(gpa, tid, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = base_index.index }, } }); - assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + gop.assign(try ip.getOrPutKey(gpa, tid, key)); try ip.items.ensureUnusedCapacity(gpa, 1); - break :item .{ + ip.items.appendAssumeCapacity(.{ .tag = switch (ptr.base_addr) { .arr_elem => .ptr_elem, .field => .ptr_field, else => unreachable, }, .data = try ip.addExtra(gpa, PtrBaseIndex.init(ptr.ty, base_index.base, index_index, ptr.byte_offset)), - }; + }); + return gop.set(@enumFromInt(ip.items.len - 1)); }, }); }, @@ -5566,7 +5800,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .lazy_ty = lazy_ty, }), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); }, } switch (int.ty) { @@ -5707,7 +5941,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .value = casted, }), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } else |_| {} const tag: Tag = if (big_int.positive) .int_positive else .int_negative; @@ -5722,7 +5956,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .value = casted, }), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } var buf: [2]Limb = undefined; @@ -5881,7 +6115,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .tag = .only_possible_value, .data = @intFromEnum(aggregate.ty), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } switch (ty_key) { @@ -5914,7 +6148,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .tag = .only_possible_value, .data = @intFromEnum(aggregate.ty), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); }, else => {}, } @@ -5929,12 +6163,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All } const elem = switch (aggregate.storage) { .bytes => |bytes| elem: { - _ = ip.map.pop(); const elem = try ip.get(gpa, tid, .{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = bytes.at(0, ip) }, } }); - assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + gop.assign(try ip.getOrPutKey(gpa, tid, key)); try ip.items.ensureUnusedCapacity(gpa, 1); break :elem elem; }, @@ -5953,7 +6186,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .elem_val = elem, }), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } if (child == .u8_type) bytes: { @@ -5997,7 +6230,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .bytes = string, }), }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } try ip.extra.ensureUnusedCapacity( @@ -6038,7 +6271,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All ip.extra.appendSliceAssumeCapacity(@ptrCast(memoized_call.arg_values)); }, } - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } pub const UnionTypeInit = struct { @@ -6076,11 +6309,10 @@ pub const UnionTypeInit = struct { pub fn getUnionType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, ini: UnionTypeInit, ) Allocator.Error!WipNamespaceType.Result { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, Key{ .union_type = switch (ini.key) { + var gop = try ip.getOrPutKey(gpa, tid, .{ .union_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -6089,9 +6321,9 @@ pub fn getUnionType( .zir_index = r.zir_index, .type_hash = r.type_hash, } }, - } }, adapter); - if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) }; - errdefer _ = ip.map.pop(); + } }); + defer gop.deinit(); + if (gop == .existing) return .{ .existing = gop.existing }; const align_elements_len = if (ini.flags.any_aligned_fields) (ini.fields_len + 3) / 4 else 0; const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4); @@ -6167,7 +6399,7 @@ pub fn getUnionType( } return .{ .wip = .{ - .index = @enumFromInt(ip.items.len - 1), + .index = gop.set(@enumFromInt(ip.items.len - 1)), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "decl").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeUnion, "namespace").? @@ -6225,11 +6457,10 @@ pub const StructTypeInit = struct { pub fn getStructType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, ini: StructTypeInit, ) Allocator.Error!WipNamespaceType.Result { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const key: Key = .{ .struct_type = switch (ini.key) { + var gop = try ip.getOrPutKey(gpa, tid, .{ .struct_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -6238,10 +6469,9 @@ pub fn getStructType( .zir_index = r.zir_index, .type_hash = r.type_hash, } }, - } }; - const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); - if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) }; - errdefer _ = ip.map.pop(); + } }); + defer gop.deinit(); + if (gop == .existing) return .{ .existing = gop.existing }; const names_map = try ip.addMap(gpa, ini.fields_len); errdefer _ = ip.maps.pop(); @@ -6298,7 +6528,7 @@ pub fn getStructType( ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); } return .{ .wip = .{ - .index = @enumFromInt(ip.items.len - 1), + .index = gop.set(@enumFromInt(ip.items.len - 1)), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "decl").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").? @@ -6387,7 +6617,7 @@ pub fn getStructType( } ip.extra.appendNTimesAssumeCapacity(std.math.maxInt(u32), ini.fields_len); return .{ .wip = .{ - .index = @enumFromInt(ip.items.len - 1), + .index = gop.set(@enumFromInt(ip.items.len - 1)), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "decl").?, .namespace_extra_index = namespace_extra_index, } }; @@ -6404,7 +6634,7 @@ pub const AnonStructTypeInit = struct { pub fn getAnonStructType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, ini: AnonStructTypeInit, ) Allocator.Error!Index { assert(ini.types.len == ini.values.len); @@ -6424,25 +6654,26 @@ pub fn getAnonStructType( }); ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.types)); ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values)); + errdefer ip.extra.items.len = prev_extra_len; - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const key: Key = .{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .anon_struct_type = if (ini.names.len == 0) extraTypeTupleAnon(ip, extra_index) else k: { assert(ini.names.len == ini.types.len); ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names)); break :k extraTypeStructAnon(ip, extra_index); }, - }; - const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } + ip.items.appendAssumeCapacity(.{ .tag = if (ini.names.len == 0) .type_tuple_anon else .type_struct_anon, .data = extra_index, }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } /// This is equivalent to `Key.FuncType` but adjusted to have a slice for `param_types`. @@ -6463,7 +6694,7 @@ pub const GetFuncTypeKey = struct { pub fn getFuncType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, key: GetFuncTypeKey, ) Allocator.Error!Index { // Validate input parameters. @@ -6501,33 +6732,33 @@ pub fn getFuncType( if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); + errdefer ip.extra.items.len = prev_extra_len; - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .func_type = extraFuncType(ip, func_type_extra_index), - }, adapter); - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } ip.items.appendAssumeCapacity(.{ .tag = .type_function, .data = func_type_extra_index, }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } pub fn getExternFunc( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, key: Key.ExternFunc, ) Allocator.Error!Index { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, Key{ .extern_func = key }, adapter); - if (gop.found_existing) return @enumFromInt(gop.index); - errdefer _ = ip.map.pop(); + var gop = try ip.getOrPutKey(gpa, tid, .{ .extern_func = key }); + defer gop.deinit(); + if (gop == .existing) return gop.existing; const prev_extra_len = ip.extra.items.len; const extra_index = try ip.addExtra(gpa, @as(Tag.ExternFunc, key)); errdefer ip.extra.items.len = prev_extra_len; @@ -6536,7 +6767,7 @@ pub fn getExternFunc( .data = extra_index, }); errdefer ip.items.len -= 1; - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } pub const GetFuncDeclKey = struct { @@ -6554,7 +6785,7 @@ pub const GetFuncDeclKey = struct { pub fn getFuncDecl( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, key: GetFuncDeclKey, ) Allocator.Error!Index { // The strategy here is to add the function type unconditionally, then to @@ -6564,7 +6795,6 @@ pub fn getFuncDecl( try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len); try ip.items.ensureUnusedCapacity(gpa, 1); - try ip.map.ensureUnusedCapacity(gpa, 1); const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ .analysis = .{ @@ -6583,22 +6813,22 @@ pub fn getFuncDecl( .lbrace_column = key.lbrace_column, .rbrace_column = key.rbrace_column, }); + errdefer ip.extra.items.len = prev_extra_len; - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncDecl(ip, func_decl_extra_index), - }, adapter); - - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } ip.items.appendAssumeCapacity(.{ .tag = .func_decl, .data = func_decl_extra_index, }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } pub const GetFuncDeclIesKey = struct { @@ -6626,7 +6856,7 @@ pub const GetFuncDeclIesKey = struct { pub fn getFuncDeclIes( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, key: GetFuncDeclIesKey, ) Allocator.Error!Index { // Validate input parameters. @@ -6639,7 +6869,6 @@ pub fn getFuncDeclIes( const prev_extra_len = ip.extra.items.len; const params_len: u32 = @intCast(key.param_types.len); - try ip.map.ensureUnusedCapacity(gpa, 4); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len + 1 + // inferred_error_set @typeInfo(Tag.ErrorUnionType).Struct.fields.len + @@ -6704,40 +6933,51 @@ pub fn getFuncDeclIes( if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); + errdefer { + ip.items.len -= 4; + ip.extra.items.len = prev_extra_len; + } ip.items.appendAssumeCapacity(.{ .tag = .type_function, .data = func_type_extra_index, }); - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncDecl(ip, func_decl_extra_index), - }, adapter); - if (!gop.found_existing) { - assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ .error_union_type = .{ - .error_set_type = @enumFromInt(ip.items.len - 2), - .payload_type = key.bare_return_type, - } }, adapter).found_existing); - assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ - .inferred_error_set_type = @enumFromInt(ip.items.len - 4), - }, adapter).found_existing); - assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ - .func_type = extraFuncType(ip, func_type_extra_index), - }, adapter).found_existing); - return @enumFromInt(ip.items.len - 4); - } - - // An existing function type was found; undo the additions to our two arrays. - ip.items.len -= 4; - ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + }); + defer gop.deinit(); + if (gop == .existing) { + // An existing function type was found; undo the additions to our two arrays. + ip.items.len -= 4; + ip.extra.items.len = prev_extra_len; + return gop.existing; + } + + var eu_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ + .error_set_type = @enumFromInt(ip.items.len - 2), + .payload_type = key.bare_return_type, + } }); + defer eu_gop.deinit(); + var ies_gop = try ip.getOrPutKey(gpa, tid, .{ + .inferred_error_set_type = @enumFromInt(ip.items.len - 4), + }); + defer ies_gop.deinit(); + var ty_gop = try ip.getOrPutKey(gpa, tid, .{ + .func_type = extraFuncType(ip, func_type_extra_index), + }); + defer ty_gop.deinit(); + const index = gop.set(@enumFromInt(ip.items.len - 4)); + _ = eu_gop.set(@enumFromInt(@intFromEnum(index) + 1)); + _ = ies_gop.set(@enumFromInt(@intFromEnum(index) + 2)); + _ = ty_gop.set(@enumFromInt(@intFromEnum(index) + 3)); + return index; } pub fn getErrorSetType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, names: []const NullTerminatedString, ) Allocator.Error!Index { assert(std.sort.isSorted(NullTerminatedString, names, {}, NullTerminatedString.indexLessThan)); @@ -6757,16 +6997,15 @@ pub fn getErrorSetType( .names_map = predicted_names_map, }); ip.extra.appendSliceAssumeCapacity(@ptrCast(names)); + errdefer ip.extra.items.len = prev_extra_len; - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .error_set_type = extraErrorSet(ip, error_set_extra_index), - }, adapter); - errdefer _ = ip.map.pop(); - - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } try ip.items.append(gpa, .{ @@ -6781,7 +7020,7 @@ pub fn getErrorSetType( addStringsToMap(ip, names_map, names); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } pub const GetFuncInstanceKey = struct { @@ -6845,14 +7084,13 @@ pub fn getFuncInstance( }); ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.comptime_args)); - const gop = try ip.map.getOrPutAdapted(gpa, Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncInstance(ip, func_extra_index), - }, KeyAdapter{ .intern_pool = ip }); - errdefer _ = ip.map.pop(); - - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } const func_index: Index = @enumFromInt(ip.items.len); @@ -6863,7 +7101,7 @@ pub fn getFuncInstance( }); errdefer ip.items.len -= 1; - return finishFuncInstance( + return gop.set(try finishFuncInstance( ip, gpa, tid, @@ -6872,7 +7110,7 @@ pub fn getFuncInstance( func_extra_index, arg.alignment, arg.section, - ); + )); } /// This function exists separately than `getFuncInstance` because it needs to @@ -6897,7 +7135,6 @@ pub fn getFuncInstanceIes( const prev_extra_len = ip.extra.items.len; const params_len: u32 = @intCast(arg.param_types.len); - try ip.map.ensureUnusedCapacity(gpa, 4); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncInstance).Struct.fields.len + 1 + // inferred_error_set arg.comptime_args.len + @@ -6970,30 +7207,37 @@ pub fn getFuncInstanceIes( .tag = .type_function, .data = func_type_extra_index, }); + errdefer { + ip.items.len -= 4; + ip.extra.items.len = prev_extra_len; + } - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncInstance(ip, func_extra_index), - }, adapter); - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { // Hot path: undo the additions to our two arrays. ip.items.len -= 4; ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } // Synchronize the map with items. - assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ .error_union_type = .{ + var eu_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ .error_set_type = error_set_type, .payload_type = arg.bare_return_type, - } }, adapter).found_existing); - assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ + } }); + defer eu_gop.deinit(); + var ies_gop = try ip.getOrPutKey(gpa, tid, .{ .inferred_error_set_type = func_index, - }, adapter).found_existing); - assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ + }); + defer ies_gop.deinit(); + var ty_gop = try ip.getOrPutKey(gpa, tid, .{ .func_type = extraFuncType(ip, func_type_extra_index), - }, adapter).found_existing); - return finishFuncInstance( + }); + defer ty_gop.deinit(); + const index = gop.set(try finishFuncInstance( ip, gpa, tid, @@ -7002,7 +7246,11 @@ pub fn getFuncInstanceIes( func_extra_index, arg.alignment, arg.section, - ); + )); + _ = eu_gop.set(@enumFromInt(@intFromEnum(index) + 1)); + _ = ies_gop.set(@enumFromInt(@intFromEnum(index) + 2)); + _ = ty_gop.set(@enumFromInt(@intFromEnum(index) + 3)); + return index; } fn finishFuncInstance( @@ -7135,11 +7383,10 @@ pub const WipEnumType = struct { pub fn getEnumType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, ini: EnumTypeInit, ) Allocator.Error!WipEnumType.Result { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, Key{ .enum_type = switch (ini.key) { + var gop = try ip.getOrPutKey(gpa, tid, .{ .enum_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -7148,10 +7395,9 @@ pub fn getEnumType( .zir_index = r.zir_index, .type_hash = r.type_hash, } }, - } }, adapter); - if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) }; - assert(gop.index == ip.items.len); - errdefer _ = ip.map.pop(); + } }); + defer gop.deinit(); + if (gop == .existing) return .{ .existing = gop.existing }; try ip.items.ensureUnusedCapacity(gpa, 1); @@ -7196,7 +7442,7 @@ pub fn getEnumType( const names_start = ip.extra.items.len; ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); return .{ .wip = .{ - .index = @enumFromInt(gop.index), + .index = gop.set(@enumFromInt(ip.items.len - 1)), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, .namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null, @@ -7260,7 +7506,7 @@ pub fn getEnumType( ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); } return .{ .wip = .{ - .index = @enumFromInt(gop.index), + .index = gop.set(@enumFromInt(ip.items.len - 1)), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, .namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null, @@ -7288,14 +7534,13 @@ const GeneratedTagEnumTypeInit = struct { pub fn getGeneratedTagEnumType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, ini: GeneratedTagEnumTypeInit, ) Allocator.Error!Index { assert(ip.isUnion(ini.owner_union_ty)); assert(ip.isIntegerType(ini.tag_ty)); for (ini.values) |val| assert(ip.typeOf(val) == ini.tag_ty); - try ip.map.ensureUnusedCapacity(gpa, 1); try ip.items.ensureUnusedCapacity(gpa, 1); const names_map = try ip.addMap(gpa, ini.names.len); @@ -7304,6 +7549,7 @@ pub fn getGeneratedTagEnumType( const fields_len: u32 = @intCast(ini.names.len); + const prev_extra_len = ip.extra.items.len; switch (ini.tag_mode) { .auto => { try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + @@ -7360,17 +7606,17 @@ pub fn getGeneratedTagEnumType( ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values)); }, } - // Same as above - errdefer @compileError("error path leaks values_map and extra data"); + errdefer ip.extra.items.len = prev_extra_len; + errdefer switch (ini.tag_mode) { + .auto => {}, + .explicit, .nonexhaustive => _ = if (ini.values.len != 0) ip.maps.pop(), + }; - // Capacity for this was ensured earlier - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ .enum_type = .{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .enum_type = .{ .generated_tag = .{ .union_type = ini.owner_union_ty }, - } }, adapter); - assert(!gop.found_existing); - assert(gop.index == ip.items.len - 1); - return @enumFromInt(gop.index); + } }); + defer gop.deinit(); + return gop.set(@enumFromInt(ip.items.len - 1)); } pub const OpaqueTypeInit = struct { @@ -7390,11 +7636,10 @@ pub const OpaqueTypeInit = struct { pub fn getOpaqueType( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, ini: OpaqueTypeInit, ) Allocator.Error!WipNamespaceType.Result { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = try ip.map.getOrPutAdapted(gpa, Key{ .opaque_type = switch (ini.key) { + var gop = try ip.getOrPutKey(gpa, tid, .{ .opaque_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -7403,9 +7648,9 @@ pub fn getOpaqueType( .zir_index = r.zir_index, .type_hash = 0, } }, - } }, adapter); - if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) }; - errdefer _ = ip.map.pop(); + } }); + defer gop.deinit(); + if (gop == .existing) return .{ .existing = gop.existing }; try ip.items.ensureUnusedCapacity(gpa, 1); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeOpaque).Struct.fields.len + switch (ini.key) { .declared => |d| d.captures.len, @@ -7431,7 +7676,7 @@ pub fn getOpaqueType( .reified => {}, } return .{ .wip = .{ - .index = @enumFromInt(gop.index), + .index = gop.set(@enumFromInt(ip.items.len - 1)), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "decl").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "namespace").? @@ -7441,9 +7686,19 @@ pub fn getOpaqueType( } pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const index = ip.map.getIndexAdapted(key, adapter) orelse return null; - return @enumFromInt(index); + const full_hash = key.hash64(ip); + const hash: u32 = @truncate(full_hash >> 32); + const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; + const map = shard.map.acquire(); + const map_mask = map.header().mask(); + var map_index = hash; + while (true) : (map_index += Shard.Map.Entry.fields_len) { + map_index &= map_mask; + const entry = map.at(map_index); + const index = entry.acquire(); + if (index == .none) return null; + if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) return index; + } } pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { @@ -7506,7 +7761,6 @@ pub fn remove(ip: *InternPool, index: Index) void { if (@intFromEnum(index) == ip.items.len - 1) { // Happy case - we can just drop the item without affecting any other indices. ip.items.len -= 1; - _ = ip.map.pop(); } else { // We must preserve the item so that indices following it remain valid. // Thus, we will rewrite the tag to `removed`, leaking the item until @@ -8133,35 +8387,34 @@ fn getCoercedFuncInstance( fn getCoercedFunc( ip: *InternPool, gpa: Allocator, - _: Zcu.PerThread.Id, + tid: Zcu.PerThread.Id, func: Index, ty: Index, ) Allocator.Error!Index { const prev_extra_len = ip.extra.items.len; try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncCoerced).Struct.fields.len); try ip.items.ensureUnusedCapacity(gpa, 1); - try ip.map.ensureUnusedCapacity(gpa, 1); const extra_index = ip.addExtraAssumeCapacity(Tag.FuncCoerced{ .ty = ty, .func = func, }); + errdefer ip.extra.items.len = prev_extra_len; - const adapter: KeyAdapter = .{ .intern_pool = ip }; - const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ + var gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncCoerced(ip, extra_index), - }, adapter); - - if (gop.found_existing) { + }); + defer gop.deinit(); + if (gop == .existing) { ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + return gop.existing; } ip.items.appendAssumeCapacity(.{ .tag = .func_coerced, .data = extra_index, }); - return @enumFromInt(ip.items.len - 1); + return gop.set(@enumFromInt(ip.items.len - 1)); } /// Asserts `val` has an integer type. diff --git a/src/Zcu.zig b/src/Zcu.zig index c4ebc6a36b..32c9045910 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2394,9 +2394,9 @@ pub const CompileError = error{ ComptimeBreak, }; -pub fn init(mod: *Module) !void { +pub fn init(mod: *Module, thread_count: usize) !void { const gpa = mod.gpa; - try mod.intern_pool.init(gpa); + try mod.intern_pool.init(gpa, thread_count); try mod.global_error_set.put(gpa, .empty, {}); } -- cgit v1.2.3 From 8293ff94cf2798a2678b91019979472d34273bdb Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 3 Jul 2024 22:37:09 -0400 Subject: InternPool: implement and use thread-safe list for strings --- lib/std/multi_array_list.zig | 2 +- src/Compilation.zig | 6 +- src/InternPool.zig | 460 +++++++++++++++++++++++++++++++------------ src/Value.zig | 17 +- src/Zcu.zig | 41 ++-- src/Zcu/PerThread.zig | 11 +- 6 files changed, 380 insertions(+), 157 deletions(-) (limited to 'src/Compilation.zig') diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig index d7327f8bee..cfe77f11b5 100644 --- a/lib/std/multi_array_list.zig +++ b/lib/std/multi_array_list.zig @@ -534,7 +534,7 @@ pub fn MultiArrayList(comptime T: type) type { self.sortInternal(a, b, ctx, .unstable); } - fn capacityInBytes(capacity: usize) usize { + pub fn capacityInBytes(capacity: usize) usize { comptime var elem_bytes: usize = 0; inline for (sizes.bytes) |size| elem_bytes += size; return elem_bytes * capacity; diff --git a/src/Compilation.zig b/src/Compilation.zig index 7e10febf0e..14d109bab3 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2748,7 +2748,7 @@ const Header = extern struct { items_len: u32, extra_len: u32, limbs_len: u32, - string_bytes_len: u32, + //string_bytes_len: u32, tracked_insts_len: u32, src_hash_deps_len: u32, decl_val_deps_len: u32, @@ -2777,7 +2777,7 @@ pub fn saveState(comp: *Compilation) !void { .items_len = @intCast(ip.items.len), .extra_len = @intCast(ip.extra.items.len), .limbs_len = @intCast(ip.limbs.items.len), - .string_bytes_len = @intCast(ip.string_bytes.items.len), + //.string_bytes_len = @intCast(ip.string_bytes.items.len), .tracked_insts_len = @intCast(ip.tracked_insts.count()), .src_hash_deps_len = @intCast(ip.src_hash_deps.count()), .decl_val_deps_len = @intCast(ip.decl_val_deps.count()), @@ -2794,7 +2794,7 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.extra.items)); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data))); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag))); - addBuf(&bufs_list, &bufs_len, ip.string_bytes.items); + //addBuf(&bufs_list, &bufs_len, ip.string_bytes.items); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys())); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.keys())); diff --git a/src/InternPool.zig b/src/InternPool.zig index f0141a9092..6b875c2288 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2,9 +2,11 @@ //! This data structure is self-contained, with the following exceptions: //! * Module.Namespace has a pointer to Module.File -local: []Local = &.{}, -shard_shift: std.math.Log2Int(usize) = 0, +locals: []Local = &.{}, shards: []Shard = &.{}, +tid_width: std.math.Log2Int(u32) = 0, +tid_shift_31: std.math.Log2Int(u32) = 31, +tid_shift_32: std.math.Log2Int(u32) = 31, items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, @@ -13,12 +15,6 @@ extra: std.ArrayListUnmanaged(u32) = .{}, /// Use the helper methods instead of accessing this directly in order to not /// violate the above mechanism. limbs: std.ArrayListUnmanaged(u64) = .{}, -/// In order to store references to strings in fewer bytes, we copy all -/// string bytes into here. String bytes can be null. It is up to whomever -/// is referencing the data here whether they want to store both index and length, -/// thus allowing null bytes, or store only index, and use null-termination. The -/// `string_bytes` array is agnostic to either usage. -string_bytes: std.ArrayListUnmanaged(u8) = .{}, /// Rather than allocating Decl objects with an Allocator, we instead allocate /// them with this SegmentedList. This provides four advantages: @@ -345,52 +341,237 @@ pub const DepEntry = extern struct { }; const Local = struct { - aligned: void align(std.atomic.cache_line) = {}, + shared: Shared align(std.atomic.cache_line), + mutate: struct { + arena: std.heap.ArenaAllocator.State, + strings: Mutate, + } align(std.atomic.cache_line), - /// header: List.Header, - /// data: [capacity]u32, - /// tag: [header.capacity]Tag, - items: List, + const Shared = struct { + strings: Strings, + }; - /// header: List.Header, - /// extra: [header.capacity]u32, - extra: List, + const Strings = List(struct { u8 }); - /// header: List.Header, - /// bytes: [header.capacity]u8, - strings: List, + const Mutate = struct { + len: u32, - arena: std.heap.ArenaAllocator.State, + const empty: Mutate = .{ + .len = 0, + }; + }; - const List = struct { - entries: [*]u32, + fn List(comptime Elem: type) type { + assert(@typeInfo(Elem) == .Struct); + return struct { + bytes: [*]align(@alignOf(Elem)) u8, + + const ListSelf = @This(); + const Mutable = struct { + gpa: std.mem.Allocator, + arena: *std.heap.ArenaAllocator.State, + mutate: *Mutate, + list: *ListSelf, + + const fields = std.enums.values(std.meta.FieldEnum(Elem)); + + fn Slice(comptime opts: struct { is_const: bool = false }) type { + const elem_info = @typeInfo(Elem).Struct; + const elem_fields = elem_info.fields; + var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined; + for (&new_fields, elem_fields) |*new_field, elem_field| new_field.* = .{ + .name = elem_field.name, + .type = @Type(.{ .Pointer = .{ + .size = .Slice, + .is_const = opts.is_const, + .is_volatile = false, + .alignment = 0, + .address_space = .generic, + .child = elem_field.type, + .is_allowzero = false, + .sentinel = null, + } }), + .default_value = null, + .is_comptime = false, + .alignment = 0, + }; + return @Type(.{ .Struct = .{ + .layout = .auto, + .fields = &new_fields, + .decls = &.{}, + .is_tuple = elem_info.is_tuple, + } }); + } - const empty: List = .{ .entries = @constCast(&(extern struct { - header: Header, - entries: [0]u32, - }{ - .header = .{ .len = 0, .capacity = 0 }, - .entries = .{}, - }).entries) }; + pub fn appendAssumeCapacity(mutable: Mutable, elem: Elem) void { + var mutable_view = mutable.view(); + defer mutable.lenPtr().* = @intCast(mutable_view.len); + mutable_view.appendAssumeCapacity(elem); + } - fn acquire(list: *const List) List { - return .{ .entries = @atomicLoad([*]u32, &list.entries, .acquire) }; - } - fn release(list: *List, new_list: List) void { - @atomicStore([*]u32, &list.entries, new_list.entries, .release); - } + pub fn appendSliceAssumeCapacity( + mutable: Mutable, + slice: Slice(.{ .is_const = true }), + ) void { + if (fields.len == 0) return; + const mutable_len = mutable.lenPtr(); + const start = mutable_len.*; + const slice_len = @field(slice, @tagName(fields[0])).len; + assert(slice_len < mutable.capacityPtr().* - start); + mutable_len.* = @intCast(start + slice_len); + const mutable_view = mutable.view(); + inline for (fields) |field| { + const field_slice = @field(slice, @tagName(field)); + assert(field_slice.len == slice_len); + @memcpy(mutable_view.items(field)[start..][0..slice_len], field_slice); + } + } - const Header = extern struct { - len: u32, - capacity: u32, + pub fn appendNTimes(mutable: Mutable, elem: Elem, len: usize) Allocator.Error!void { + try mutable.ensureUnusedCapacity(len); + mutable.appendNTimesAssumeCapacity(elem, len); + } + + pub fn appendNTimesAssumeCapacity(mutable: Mutable, elem: Elem, len: usize) void { + const mutable_len = mutable.lenPtr(); + const start = mutable_len.*; + assert(len <= mutable.capacityPtr().* - start); + mutable_len.* = @intCast(start + len); + const mutable_view = mutable.view(); + inline for (fields) |field| { + @memset(mutable_view.items(field)[start..][0..len], @field(elem, @tagName(field))); + } + } + + pub fn addManyAsSlice(mutable: Mutable, len: usize) Allocator.Error!Slice(.{}) { + try mutable.ensureUnusedCapacity(len); + return mutable.addManyAsSliceAssumeCapacity(len); + } + + pub fn addManyAsSliceAssumeCapacity(mutable: Mutable, len: usize) Slice(.{}) { + const mutable_len = mutable.lenPtr(); + const start = mutable_len.*; + assert(len <= mutable.capacityPtr().* - start); + mutable_len.* = @intCast(start + len); + const mutable_view = mutable.view(); + var slice: Slice(.{}) = undefined; + inline for (fields) |field| { + @field(slice, @tagName(field)) = mutable_view.items(field)[start..][0..len]; + } + return slice; + } + + pub fn shrinkRetainingCapacity(mutable: Mutable, len: usize) void { + const mutable_len = mutable.lenPtr(); + assert(len <= mutable_len.*); + mutable_len.* = @intCast(len); + } + + pub fn ensureUnusedCapacity(mutable: Mutable, unused_capacity: usize) Allocator.Error!void { + try mutable.ensureTotalCapacity(@intCast(mutable.lenPtr().* + unused_capacity)); + } + + pub fn ensureTotalCapacity(mutable: Mutable, total_capacity: usize) Allocator.Error!void { + const old_capacity = mutable.capacityPtr().*; + if (old_capacity >= total_capacity) return; + var new_capacity = old_capacity; + while (new_capacity < total_capacity) new_capacity = (new_capacity + 10) * 2; + try mutable.setCapacity(new_capacity); + } + + fn setCapacity(mutable: Mutable, capacity: u32) Allocator.Error!void { + var arena = mutable.arena.promote(mutable.gpa); + defer mutable.arena.* = arena.state; + const buf = try arena.allocator().alignedAlloc( + u8, + alignment, + bytes_offset + View.capacityInBytes(capacity), + ); + var new_list: ListSelf = .{ .bytes = @ptrCast(buf[bytes_offset..].ptr) }; + new_list.header().* = .{ .capacity = capacity }; + const len = mutable.lenPtr().*; + const old_slice = mutable.list.view().slice(); + const new_slice = new_list.view().slice(); + inline for (fields) |field| { + @memcpy(new_slice.items(field)[0..len], old_slice.items(field)[0..len]); + } + mutable.list.release(new_list); + } + + fn view(mutable: Mutable) View { + return .{ + .bytes = mutable.list.bytes, + .len = mutable.lenPtr().*, + .capacity = mutable.capacityPtr().*, + }; + } + + pub fn lenPtr(mutable: Mutable) *u32 { + return &mutable.mutate.len; + } + + pub fn capacityPtr(mutable: Mutable) *u32 { + return &mutable.list.header().capacity; + } + }; + + const empty: ListSelf = .{ .bytes = @constCast(&(extern struct { + header: Header, + bytes: [0]u8, + }{ + .header = .{ .capacity = 0 }, + .bytes = .{}, + }).bytes) }; - const fields_len = @typeInfo(Header).Struct.fields.len; + const alignment = @max(@alignOf(Header), @alignOf(Elem)); + const bytes_offset = std.mem.alignForward(usize, @sizeOf(Header), @alignOf(Elem)); + const View = std.MultiArrayList(Elem); + + fn acquire(list: *const ListSelf) ListSelf { + return .{ .bytes = @atomicLoad([*]align(@alignOf(Elem)) u8, &list.bytes, .acquire) }; + } + fn release(list: *ListSelf, new_list: ListSelf) void { + @atomicStore([*]align(@alignOf(Elem)) u8, &list.bytes, new_list.bytes, .release); + } + + const Header = extern struct { + capacity: u32, + }; + fn header(list: ListSelf) *Header { + return @ptrFromInt(@intFromPtr(list.bytes) - bytes_offset); + } + + fn view(list: ListSelf) View { + const capacity = list.header().capacity; + return .{ + .bytes = list.bytes, + .len = capacity, + .capacity = capacity, + }; + } }; - fn header(list: List) *Header { - return @ptrCast(list.entries - Header.fields_len); - } - }; + } + + /// In order to store references to strings in fewer bytes, we copy all + /// string bytes into here. String bytes can be null. It is up to whomever + /// is referencing the data here whether they want to store both index and length, + /// thus allowing null bytes, or store only index, and use null-termination. The + /// `strings` array is agnostic to either usage. + pub fn getMutableStrings(local: *Local, gpa: std.mem.Allocator) Strings.Mutable { + return .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.strings, + .list = &local.shared.strings, + }; + } }; +pub fn getLocal(ip: *InternPool, tid: Zcu.PerThread.Id) *Local { + return &ip.locals[@intFromEnum(tid)]; +} +pub fn getLocalShared(ip: *const InternPool, tid: Zcu.PerThread.Id) *const Local.Shared { + return &ip.locals[@intFromEnum(tid)].shared; +} const Shard = struct { shared: struct { @@ -448,7 +629,7 @@ const Shard = struct { } }; fn header(map: @This()) *Header { - return &(@as([*]Header, @ptrCast(map.entries)) - 1)[0]; + return @ptrFromInt(@intFromPtr(map.entries) - entries_offset); } const Entry = extern struct { @@ -465,6 +646,17 @@ const Shard = struct { }; } }; +fn getShard(ip: *InternPool, tid: Zcu.PerThread.Id) *Shard { + return &ip.shards[@intFromEnum(tid)]; +} + +fn getTidMask(ip: *const InternPool) u32 { + assert(std.math.isPowerOfTwo(ip.shards.len)); + return @intCast(ip.shards.len - 1); +} +fn getIndexMask(ip: *const InternPool, comptime BackingInt: type) u32 { + return @as(u32, std.math.maxInt(BackingInt)) >> ip.tid_width; +} const FieldMap = std.ArrayHashMapUnmanaged(void, void, std.array_hash_map.AutoContext(void), false); @@ -560,18 +752,18 @@ pub const OptionalNamespaceIndex = enum(u32) { } }; -/// An index into `string_bytes`. +/// An index into `strings`. pub const String = enum(u32) { /// An empty string. empty = 0, _, pub fn toSlice(string: String, len: u64, ip: *const InternPool) []const u8 { - return ip.string_bytes.items[@intFromEnum(string)..][0..@intCast(len)]; + return string.toOverlongSlice(ip)[0..@intCast(len)]; } pub fn at(string: String, index: u64, ip: *const InternPool) u8 { - return ip.string_bytes.items[@intCast(@intFromEnum(string) + index)]; + return string.toOverlongSlice(ip)[@intCast(index)]; } pub fn toNullTerminatedString(string: String, len: u64, ip: *const InternPool) NullTerminatedString { @@ -579,9 +771,32 @@ pub const String = enum(u32) { assert(string.at(len, ip) == 0); return @enumFromInt(@intFromEnum(string)); } + + const Unwrapped = struct { + tid: Zcu.PerThread.Id, + index: u32, + + fn wrap(unwrapped: Unwrapped, ip: *const InternPool) String { + assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); + assert(unwrapped.index <= ip.getIndexMask(u32)); + return @enumFromInt(@intFromEnum(unwrapped.tid) << ip.tid_shift_32 | unwrapped.index); + } + }; + fn unwrap(string: String, ip: *const InternPool) Unwrapped { + return .{ + .tid = @enumFromInt(@intFromEnum(string) >> ip.tid_shift_32 & ip.getTidMask()), + .index = @intFromEnum(string) & ip.getIndexMask(u32), + }; + } + + fn toOverlongSlice(string: String, ip: *const InternPool) []const u8 { + const unwrapped = string.unwrap(ip); + const strings = ip.getLocalShared(unwrapped.tid).strings.acquire(); + return strings.view().items(.@"0")[unwrapped.index..]; + } }; -/// An index into `string_bytes` which might be `none`. +/// An index into `strings` which might be `none`. pub const OptionalString = enum(u32) { /// This is distinct from `none` - it is a valid index that represents empty string. empty = 0, @@ -597,7 +812,7 @@ pub const OptionalString = enum(u32) { } }; -/// An index into `string_bytes`. +/// An index into `strings`. pub const NullTerminatedString = enum(u32) { /// An empty string. empty = 0, @@ -623,12 +838,8 @@ pub const NullTerminatedString = enum(u32) { return @enumFromInt(@intFromEnum(self)); } - fn toOverlongSlice(string: NullTerminatedString, ip: *const InternPool) []const u8 { - return ip.string_bytes.items[@intFromEnum(string)..]; - } - pub fn toSlice(string: NullTerminatedString, ip: *const InternPool) [:0]const u8 { - const overlong_slice = string.toOverlongSlice(ip); + const overlong_slice = string.toString().toOverlongSlice(ip); return overlong_slice[0..std.mem.indexOfScalar(u8, overlong_slice, 0).? :0]; } @@ -637,7 +848,7 @@ pub const NullTerminatedString = enum(u32) { } pub fn eqlSlice(string: NullTerminatedString, slice: []const u8, ip: *const InternPool) bool { - const overlong_slice = string.toOverlongSlice(ip); + const overlong_slice = string.toString().toOverlongSlice(ip); return overlong_slice.len > slice.len and std.mem.eql(u8, overlong_slice[0..slice.len], slice) and overlong_slice[slice.len] == 0; @@ -688,12 +899,12 @@ pub const NullTerminatedString = enum(u32) { } else @compileError("invalid format string '" ++ specifier ++ "' for '" ++ @typeName(NullTerminatedString) ++ "'"); } - pub fn fmt(self: NullTerminatedString, ip: *const InternPool) std.fmt.Formatter(format) { - return .{ .data = .{ .string = self, .ip = ip } }; + pub fn fmt(string: NullTerminatedString, ip: *const InternPool) std.fmt.Formatter(format) { + return .{ .data = .{ .string = string, .ip = ip } }; } }; -/// An index into `string_bytes` which might be `none`. +/// An index into `strings` which might be `none`. pub const OptionalNullTerminatedString = enum(u32) { /// This is distinct from `none` - it is a valid index that represents empty string. empty = 0, @@ -4077,7 +4288,7 @@ pub const FuncAnalysis = packed struct(u32) { pub const Bytes = struct { /// The type of the aggregate ty: Index, - /// Index into string_bytes, of len ip.aggregateTypeLen(ty) + /// Index into strings, of len ip.aggregateTypeLen(ty) bytes: String, }; @@ -4647,16 +4858,21 @@ pub fn init(ip: *InternPool, gpa: Allocator, total_threads: usize) !void { errdefer ip.deinit(gpa); assert(ip.items.len == 0); - ip.local = try gpa.alloc(Local, total_threads); - @memset(ip.local, .{ - .items = Local.List.empty, - .extra = Local.List.empty, - .strings = Local.List.empty, - .arena = .{}, + ip.locals = try gpa.alloc(Local, total_threads); + @memset(ip.locals, .{ + .shared = .{ + .strings = Local.Strings.empty, + }, + .mutate = .{ + .arena = .{}, + .strings = Local.Mutate.empty, + }, }); - ip.shard_shift = @intCast(std.math.log2_int_ceil(usize, total_threads)); - ip.shards = try gpa.alloc(Shard, @as(usize, 1) << ip.shard_shift); + ip.tid_width = @intCast(std.math.log2_int_ceil(usize, total_threads)); + ip.tid_shift_31 = 31 - ip.tid_width; + ip.tid_shift_32 = ip.tid_shift_31 +| 1; + ip.shards = try gpa.alloc(Shard, @as(usize, 1) << ip.tid_width); @memset(ip.shards, .{ .shared = .{ .map = Shard.Map(Index).empty, @@ -4705,7 +4921,6 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.items.deinit(gpa); ip.extra.deinit(gpa); ip.limbs.deinit(gpa); - ip.string_bytes.deinit(gpa); ip.decls_free_list.deinit(gpa); ip.allocated_decls.deinit(gpa); @@ -4732,8 +4947,8 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.files.deinit(gpa); gpa.free(ip.shards); - for (ip.local) |*local| local.arena.promote(gpa).deinit(); - gpa.free(ip.local); + for (ip.locals) |*local| local.mutate.arena.promote(gpa).deinit(); + gpa.free(ip.locals); ip.* = undefined; } @@ -5437,8 +5652,9 @@ fn getOrPutKey( } const map_header = map.header().*; if (shard.mutate.map.len >= map_header.capacity * 3 / 5) { - var arena = ip.local[@intFromEnum(tid)].arena.promote(gpa); - defer ip.local[@intFromEnum(tid)].arena = arena.state; + const arena_state = &ip.getLocal(tid).mutate.arena; + var arena = arena_state.promote(gpa); + defer arena_state.* = arena.state; const new_map_capacity = map_header.capacity * 2; const new_map_buf = try arena.allocator().alignedAlloc( u8, @@ -6194,33 +6410,32 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All } if (child == .u8_type) bytes: { - const string_bytes_index = ip.string_bytes.items.len; - try ip.string_bytes.ensureUnusedCapacity(gpa, @intCast(len_including_sentinel + 1)); + const strings = ip.getLocal(tid).getMutableStrings(gpa); + const start = strings.lenPtr().*; + try strings.ensureUnusedCapacity(@intCast(len_including_sentinel + 1)); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); switch (aggregate.storage) { - .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes.toSlice(len, ip)), + .bytes => |bytes| strings.appendSliceAssumeCapacity(.{bytes.toSlice(len, ip)}), .elems => |elems| for (elems[0..@intCast(len)]) |elem| switch (ip.indexToKey(elem)) { .undef => { - ip.string_bytes.shrinkRetainingCapacity(string_bytes_index); + strings.shrinkRetainingCapacity(start); break :bytes; }, - .int => |int| ip.string_bytes.appendAssumeCapacity( - @intCast(int.storage.u64), - ), + .int => |int| strings.appendAssumeCapacity(.{@intCast(int.storage.u64)}), else => unreachable, }, .repeated_elem => |elem| switch (ip.indexToKey(elem)) { .undef => break :bytes, .int => |int| @memset( - ip.string_bytes.addManyAsSliceAssumeCapacity(@intCast(len)), + strings.addManyAsSliceAssumeCapacity(@intCast(len))[0], @intCast(int.storage.u64), ), else => unreachable, }, } - if (sentinel != .none) ip.string_bytes.appendAssumeCapacity( + if (sentinel != .none) strings.appendAssumeCapacity(.{ @intCast(ip.indexToKey(sentinel).int.storage.u64), - ); + }); const string = try ip.getOrPutTrailingString( gpa, tid, @@ -9050,10 +9265,11 @@ pub fn getOrPutString( slice: []const u8, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { - try ip.string_bytes.ensureUnusedCapacity(gpa, slice.len + 1); - ip.string_bytes.appendSliceAssumeCapacity(slice); - ip.string_bytes.appendAssumeCapacity(0); - return ip.getOrPutTrailingString(gpa, tid, slice.len + 1, embedded_nulls); + const strings = ip.getLocal(tid).getMutableStrings(gpa); + try strings.ensureUnusedCapacity(slice.len + 1); + strings.appendSliceAssumeCapacity(.{slice}); + strings.appendAssumeCapacity(.{0}); + return ip.getOrPutTrailingString(gpa, tid, @intCast(slice.len + 1), embedded_nulls); } pub fn getOrPutStringFmt( @@ -9064,11 +9280,12 @@ pub fn getOrPutStringFmt( args: anytype, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { - // ensure that references to string_bytes in args do not get invalidated - const len: usize = @intCast(std.fmt.count(format, args) + 1); - try ip.string_bytes.ensureUnusedCapacity(gpa, len); - ip.string_bytes.writer(undefined).print(format, args) catch unreachable; - ip.string_bytes.appendAssumeCapacity(0); + // ensure that references to strings in args do not get invalidated + const format_z = format ++ .{0}; + const len: u32 = @intCast(std.fmt.count(format_z, args)); + const strings = ip.getLocal(tid).getMutableStrings(gpa); + const slice = try strings.addManyAsSlice(len); + assert((std.fmt.bufPrint(slice[0], format_z, args) catch unreachable).len == len); return ip.getOrPutTrailingString(gpa, tid, len, embedded_nulls); } @@ -9083,47 +9300,33 @@ pub fn getOrPutStringOpt( return string.toOptional(); } -/// Uses the last len bytes of ip.string_bytes as the key. +/// Uses the last len bytes of strings as the key. pub fn getOrPutTrailingString( ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, - len: usize, + len: u32, comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { - const string_bytes = &ip.string_bytes; - const str_index: u32 = @intCast(string_bytes.items.len - len); - if (len > 0 and string_bytes.getLast() == 0) { - _ = string_bytes.pop(); + const strings = ip.getLocal(tid).getMutableStrings(gpa); + const start: u32 = @intCast(strings.lenPtr().* - len); + if (len > 0 and strings.view().items(.@"0")[strings.lenPtr().* - 1] == 0) { + strings.lenPtr().* -= 1; } else { - try string_bytes.ensureUnusedCapacity(gpa, 1); + try strings.ensureUnusedCapacity(1); } - const key: []const u8 = string_bytes.items[str_index..]; + const key: []const u8 = strings.view().items(.@"0")[start..]; + const value: embedded_nulls.StringType() = + @enumFromInt(@intFromEnum(tid) << ip.tid_shift_32 | start); const has_embedded_null = std.mem.indexOfScalar(u8, key, 0) != null; switch (embedded_nulls) { .no_embedded_nulls => assert(!has_embedded_null), .maybe_embedded_nulls => if (has_embedded_null) { - string_bytes.appendAssumeCapacity(0); - return @enumFromInt(str_index); + strings.appendAssumeCapacity(.{0}); + return value; }, } - const maybe_existing_index = try ip.getOrPutStringValue(gpa, tid, key, @enumFromInt(str_index)); - if (maybe_existing_index.unwrap()) |existing_index| { - string_bytes.shrinkRetainingCapacity(str_index); - return @enumFromInt(@intFromEnum(existing_index)); - } else { - string_bytes.appendAssumeCapacity(0); - return @enumFromInt(str_index); - } -} -fn getOrPutStringValue( - ip: *InternPool, - gpa: Allocator, - tid: Zcu.PerThread.Id, - key: []const u8, - value: NullTerminatedString, -) Allocator.Error!OptionalNullTerminatedString { const full_hash = Hash.hash(0, key); const hash: u32 = @truncate(full_hash >> 32); const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; @@ -9136,7 +9339,9 @@ fn getOrPutStringValue( const entry = &map.entries[map_index]; const index = entry.acquire().unwrap() orelse break; if (entry.hash != hash) continue; - if (index.eqlSlice(key, ip)) return index.toOptional(); + if (!index.eqlSlice(key, ip)) continue; + strings.shrinkRetainingCapacity(start); + return @enumFromInt(@intFromEnum(index)); } shard.mutate.string_map.mutex.lock(); defer shard.mutate.string_map.mutex.unlock(); @@ -9151,18 +9356,22 @@ fn getOrPutStringValue( const entry = &map.entries[map_index]; const index = entry.acquire().unwrap() orelse break; if (entry.hash != hash) continue; - if (index.eqlSlice(key, ip)) return index.toOptional(); + if (!index.eqlSlice(key, ip)) continue; + strings.shrinkRetainingCapacity(start); + return @enumFromInt(@intFromEnum(index)); } defer shard.mutate.string_map.len += 1; const map_header = map.header().*; if (shard.mutate.string_map.len < map_header.capacity * 3 / 5) { const entry = &map.entries[map_index]; entry.hash = hash; - entry.release(value.toOptional()); - return .none; + entry.release(@enumFromInt(@intFromEnum(value))); + strings.appendAssumeCapacity(.{0}); + return value; } - var arena = ip.local[@intFromEnum(tid)].arena.promote(gpa); - defer ip.local[@intFromEnum(tid)].arena = arena.state; + const arena_state = &ip.getLocal(tid).mutate.arena; + var arena = arena_state.promote(gpa); + defer arena_state.* = arena.state; const new_map_capacity = map_header.capacity * 2; const new_map_buf = try arena.allocator().alignedAlloc( u8, @@ -9197,11 +9406,12 @@ fn getOrPutStringValue( if (map.entries[map_index].value == .none) break; } map.entries[map_index] = .{ - .value = value.toOptional(), + .value = @enumFromInt(@intFromEnum(value)), .hash = hash, }; shard.shared.string_map.release(new_map); - return .none; + strings.appendAssumeCapacity(.{0}); + return value; } pub fn getString(ip: *InternPool, key: []const u8) OptionalNullTerminatedString { diff --git a/src/Value.zig b/src/Value.zig index e47598fe0a..c3e4b05fcb 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -65,8 +65,9 @@ pub fn toIpString(val: Value, ty: Type, pt: Zcu.PerThread) !InternPool.NullTermi .elems => return arrayToIpString(val, ty.arrayLen(mod), pt), .repeated_elem => |elem| { const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(pt)); - const len: usize = @intCast(ty.arrayLen(mod)); - try ip.string_bytes.appendNTimes(mod.gpa, byte, len); + const len: u32 = @intCast(ty.arrayLen(mod)); + const strings = ip.getLocal(pt.tid).getMutableStrings(mod.gpa); + try strings.appendNTimes(.{byte}, len); return ip.getOrPutTrailingString(mod.gpa, pt.tid, len, .no_embedded_nulls); }, } @@ -107,16 +108,18 @@ fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.Null const mod = pt.zcu; const gpa = mod.gpa; const ip = &mod.intern_pool; - const len: usize = @intCast(len_u64); - try ip.string_bytes.ensureUnusedCapacity(gpa, len); + const len: u32 = @intCast(len_u64); + const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); + const strings_len = strings.lenPtr(); + try strings.ensureUnusedCapacity(len); for (0..len) |i| { // I don't think elemValue has the possibility to affect ip.string_bytes. Let's // assert just to be sure. - const prev = ip.string_bytes.items.len; + const prev_len = strings_len.*; const elem_val = try val.elemValue(pt, i); - assert(ip.string_bytes.items.len == prev); + assert(strings_len.* == prev_len); const byte: u8 = @intCast(elem_val.toUnsignedInt(pt)); - ip.string_bytes.appendAssumeCapacity(byte); + strings.appendAssumeCapacity(.{byte}); } return ip.getOrPutTrailingString(gpa, pt.tid, len, .no_embedded_nulls); } diff --git a/src/Zcu.zig b/src/Zcu.zig index 32c9045910..04ba7cc328 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -693,38 +693,39 @@ pub const Namespace = struct { ) !InternPool.NullTerminatedString { const zcu = pt.zcu; const ip = &zcu.intern_pool; - const count = count: { + + const gpa = zcu.gpa; + const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); + // Protects reads of interned strings from being reallocated during the call to + // renderFullyQualifiedName. + const slice = try strings.addManyAsSlice(count: { var count: usize = name.length(ip) + 1; var cur_ns = &ns; while (true) { const decl = zcu.declPtr(cur_ns.decl_index); - count += decl.name.length(ip) + 1; cur_ns = zcu.namespacePtr(cur_ns.parent.unwrap() orelse { - count += ns.fileScope(zcu).sub_file_path.len; + count += ns.fileScope(zcu).fullyQualifiedNameLen(); break :count count; }); + count += decl.name.length(ip) + 1; } - }; - - const gpa = zcu.gpa; - const start = ip.string_bytes.items.len; - // Protects reads of interned strings from being reallocated during the call to - // renderFullyQualifiedName. - try ip.string_bytes.ensureUnusedCapacity(gpa, count); - ns.renderFullyQualifiedName(zcu, name, ip.string_bytes.writer(gpa)) catch unreachable; + }); + var fbs = std.io.fixedBufferStream(slice[0]); + ns.renderFullyQualifiedName(zcu, name, fbs.writer()) catch unreachable; + assert(fbs.pos == slice[0].len); // Sanitize the name for nvptx which is more restrictive. // TODO This should be handled by the backend, not the frontend. Have a // look at how the C backend does it for inspiration. const cpu_arch = zcu.root_mod.resolved_target.result.cpu.arch; if (cpu_arch.isNvptx()) { - for (ip.string_bytes.items[start..]) |*byte| switch (byte.*) { + for (slice[0]) |*byte| switch (byte.*) { '{', '}', '*', '[', ']', '(', ')', ',', ' ', '\'' => byte.* = '_', else => {}, }; } - return ip.getOrPutTrailingString(gpa, pt.tid, ip.string_bytes.items.len - start, .no_embedded_nulls); + return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(slice[0].len), .no_embedded_nulls); } pub fn getType(ns: Namespace, zcu: *Zcu) Type { @@ -859,6 +860,11 @@ pub const File = struct { return &file.tree; } + pub fn fullyQualifiedNameLen(file: File) usize { + const ext = std.fs.path.extension(file.sub_file_path); + return file.sub_file_path.len - ext.len; + } + pub fn renderFullyQualifiedName(file: File, writer: anytype) !void { // Convert all the slashes into dots and truncate the extension. const ext = std.fs.path.extension(file.sub_file_path); @@ -879,9 +885,12 @@ pub const File = struct { pub fn fullyQualifiedName(file: File, pt: Zcu.PerThread) !InternPool.NullTerminatedString { const gpa = pt.zcu.gpa; const ip = &pt.zcu.intern_pool; - const start = ip.string_bytes.items.len; - try file.renderFullyQualifiedName(ip.string_bytes.writer(gpa)); - return ip.getOrPutTrailingString(gpa, pt.tid, ip.string_bytes.items.len - start, .no_embedded_nulls); + const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); + const slice = try strings.addManyAsSlice(file.fullyQualifiedNameLen()); + var fbs = std.io.fixedBufferStream(slice[0]); + file.renderFullyQualifiedName(fbs.writer()) catch unreachable; + assert(fbs.pos == slice[0].len); + return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(slice[0].len), .no_embedded_nulls); } pub fn fullPath(file: File, ally: Allocator) ![]u8 { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 8cf6922345..2d2be29909 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -1377,10 +1377,11 @@ fn newEmbedFile( }; const size = std.math.cast(usize, actual_stat.size) orelse return error.Overflow; - const bytes = try ip.string_bytes.addManyAsSlice(gpa, try std.math.add(usize, size, 1)); - const actual_read = try file.readAll(bytes[0..size]); + const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); + const bytes = try strings.addManyAsSlice(try std.math.add(usize, size, 1)); + const actual_read = try file.readAll(bytes[0][0..size]); if (actual_read != size) return error.UnexpectedEndOfFile; - bytes[size] = 0; + bytes[0][size] = 0; const comp = mod.comp; switch (comp.cache_use) { @@ -1389,7 +1390,7 @@ fn newEmbedFile( errdefer gpa.free(copied_resolved_path); whole.cache_manifest_mutex.lock(); defer whole.cache_manifest_mutex.unlock(); - try man.addFilePostContents(copied_resolved_path, bytes[0..size], stat); + try man.addFilePostContents(copied_resolved_path, bytes[0][0..size], stat); }, .incremental => {}, } @@ -1401,7 +1402,7 @@ fn newEmbedFile( } }); const array_val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, - .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, pt.tid, bytes.len, .maybe_embedded_nulls) }, + .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, pt.tid, @intCast(bytes[0].len), .maybe_embedded_nulls) }, } }); const ptr_ty = (try pt.ptrType(.{ -- cgit v1.2.3 From 92ddb959a7c8877c98363b27c71cd5ae4b9603f4 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 4 Jul 2024 03:33:23 -0400 Subject: InternPool: implement and use thread-safe list for items --- src/Compilation.zig | 8 +- src/InternPool.zig | 1478 ++++++++++++++++++++++++++----------------------- src/Sema.zig | 25 +- src/Type.zig | 2 +- src/Zcu.zig | 36 -- src/Zcu/PerThread.zig | 47 +- 6 files changed, 859 insertions(+), 737 deletions(-) (limited to 'src/Compilation.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 14d109bab3..a54205dddf 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2745,7 +2745,7 @@ pub fn makeBinFileWritable(comp: *Compilation) !void { const Header = extern struct { intern_pool: extern struct { - items_len: u32, + //items_len: u32, extra_len: u32, limbs_len: u32, //string_bytes_len: u32, @@ -2774,7 +2774,7 @@ pub fn saveState(comp: *Compilation) !void { const ip = &zcu.intern_pool; const header: Header = .{ .intern_pool = .{ - .items_len = @intCast(ip.items.len), + //.items_len = @intCast(ip.items.len), .extra_len = @intCast(ip.extra.items.len), .limbs_len = @intCast(ip.limbs.items.len), //.string_bytes_len = @intCast(ip.string_bytes.items.len), @@ -2792,8 +2792,8 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs_list, &bufs_len, mem.asBytes(&header)); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.limbs.items)); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.extra.items)); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data))); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag))); + //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data))); + //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag))); //addBuf(&bufs_list, &bufs_len, ip.string_bytes.items); addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys())); diff --git a/src/InternPool.zig b/src/InternPool.zig index 6b875c2288..117b2ceef8 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -8,7 +8,7 @@ tid_width: std.math.Log2Int(u32) = 0, tid_shift_31: std.math.Log2Int(u32) = 31, tid_shift_32: std.math.Log2Int(u32) = 31, -items: std.MultiArrayList(Item) = .{}, +//items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, /// On 32-bit systems, this array is ignored and extra is used for everything. /// On 64-bit systems, this array is used for big integers and associated metadata. @@ -344,10 +344,12 @@ const Local = struct { shared: Shared align(std.atomic.cache_line), mutate: struct { arena: std.heap.ArenaAllocator.State, + items: Mutate, strings: Mutate, } align(std.atomic.cache_line), const Shared = struct { + items: List(Item), strings: Strings, }; @@ -403,6 +405,11 @@ const Local = struct { } }); } + pub fn append(mutable: Mutable, elem: Elem) Allocator.Error!void { + try mutable.ensureUnusedCapacity(1); + mutable.appendAssumeCapacity(elem); + } + pub fn appendAssumeCapacity(mutable: Mutable, elem: Elem) void { var mutable_view = mutable.view(); defer mutable.lenPtr().* = @intCast(mutable_view.len); @@ -417,7 +424,7 @@ const Local = struct { const mutable_len = mutable.lenPtr(); const start = mutable_len.*; const slice_len = @field(slice, @tagName(fields[0])).len; - assert(slice_len < mutable.capacityPtr().* - start); + assert(slice_len <= mutable.capacityPtr().* - start); mutable_len.* = @intCast(start + slice_len); const mutable_view = mutable.view(); inline for (fields) |field| { @@ -552,6 +559,15 @@ const Local = struct { }; } + pub fn getMutableItems(local: *Local, gpa: std.mem.Allocator) List(Item).Mutable { + return .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.items, + .list = &local.shared.items, + }; + } + /// In order to store references to strings in fewer bytes, we copy all /// string bytes into here. String bytes can be null. It is up to whomever /// is referencing the data here whether they want to store both index and length, @@ -566,9 +582,11 @@ const Local = struct { }; } }; + pub fn getLocal(ip: *InternPool, tid: Zcu.PerThread.Id) *Local { return &ip.locals[@intFromEnum(tid)]; } + pub fn getLocalShared(ip: *const InternPool, tid: Zcu.PerThread.Id) *const Local.Shared { return &ip.locals[@intFromEnum(tid)].shared; } @@ -646,6 +664,7 @@ const Shard = struct { }; } }; + fn getShard(ip: *InternPool, tid: Zcu.PerThread.Id) *Shard { return &ip.shards[@intFromEnum(tid)]; } @@ -654,6 +673,7 @@ fn getTidMask(ip: *const InternPool) u32 { assert(std.math.isPowerOfTwo(ip.shards.len)); return @intCast(ip.shards.len - 1); } + fn getIndexMask(ip: *const InternPool, comptime BackingInt: type) u32 { return @as(u32, std.math.maxInt(BackingInt)) >> ip.tid_width; } @@ -791,8 +811,7 @@ pub const String = enum(u32) { fn toOverlongSlice(string: String, ip: *const InternPool) []const u8 { const unwrapped = string.unwrap(ip); - const strings = ip.getLocalShared(unwrapped.tid).strings.acquire(); - return strings.view().items(.@"0")[unwrapped.index..]; + return ip.getLocalShared(unwrapped.tid).strings.acquire().view().items(.@"0")[unwrapped.index..]; } }; @@ -2309,7 +2328,7 @@ pub const LoadedUnionType = struct { }; pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { - const data = ip.items.items(.data)[@intFromEnum(index)]; + const data = index.getData(ip); const type_union = ip.extraDataTrail(Tag.TypeUnion, data); const fields_len = type_union.data.fields_len; @@ -2731,7 +2750,7 @@ pub const LoadedStructType = struct { }; pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { - const item = ip.items.get(@intFromEnum(index)); + const item = index.getItem(ip); switch (item.tag) { .type_struct => { if (item.data == 0) return .{ @@ -2955,7 +2974,7 @@ const LoadedEnumType = struct { }; pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { - const item = ip.items.get(@intFromEnum(index)); + const item = index.getItem(ip); const tag_mode: LoadedEnumType.TagMode = switch (item.tag) { .type_enum_auto => { const extra = ip.extraDataTrail(EnumAuto, item.data); @@ -3034,9 +3053,9 @@ pub const LoadedOpaqueType = struct { }; pub fn loadOpaqueType(ip: *const InternPool, index: Index) LoadedOpaqueType { - assert(ip.items.items(.tag)[@intFromEnum(index)] == .type_opaque); - const extra_index = ip.items.items(.data)[@intFromEnum(index)]; - const extra = ip.extraDataTrail(Tag.TypeOpaque, extra_index); + const item = index.getItem(ip); + assert(item.tag == .type_opaque); + const extra = ip.extraDataTrail(Tag.TypeOpaque, item.data); const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) 0 else @@ -3211,6 +3230,38 @@ pub const Index = enum(u32) { } }; + pub fn getItem(index: Index, ip: *const InternPool) Item { + const unwrapped = index.unwrap(ip); + return ip.getLocalShared(unwrapped.tid).items.acquire().view().get(unwrapped.index); + } + + pub fn getTag(index: Index, ip: *const InternPool) Tag { + const unwrapped = index.unwrap(ip); + return ip.getLocalShared(unwrapped.tid).items.acquire().view().items(.tag)[unwrapped.index]; + } + + pub fn getData(index: Index, ip: *const InternPool) u32 { + const unwrapped = index.unwrap(ip); + return ip.getLocalShared(unwrapped.tid).items.acquire().view().items(.data)[unwrapped.index]; + } + + const Unwrapped = struct { + tid: Zcu.PerThread.Id, + index: u32, + + fn wrap(unwrapped: Unwrapped, ip: *const InternPool) Index { + assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); + assert(unwrapped.index <= ip.getIndexMask(u31)); + return @enumFromInt(@intFromEnum(unwrapped.tid) << ip.tid_shift_31 | unwrapped.index); + } + }; + fn unwrap(index: Index, ip: *const InternPool) Unwrapped { + return .{ + .tid = @enumFromInt(@intFromEnum(index) >> ip.tid_shift_31 & ip.getTidMask()), + .index = @intFromEnum(index) & ip.getIndexMask(u31), + }; + } + /// This function is used in the debugger pretty formatters in tools/ to fetch the /// Tag to encoding mapping to facilitate fancy debug printing for this type. /// TODO merge this with `Tag.Payload`. @@ -4856,15 +4907,17 @@ pub const MemoizedCall = struct { pub fn init(ip: *InternPool, gpa: Allocator, total_threads: usize) !void { errdefer ip.deinit(gpa); - assert(ip.items.len == 0); + assert(ip.locals.len == 0 and ip.shards.len == 0); ip.locals = try gpa.alloc(Local, total_threads); @memset(ip.locals, .{ .shared = .{ + .items = Local.List(Item).empty, .strings = Local.Strings.empty, }, .mutate = .{ .arena = .{}, + .items = Local.Mutate.empty, .strings = Local.Mutate.empty, }, }); @@ -4918,7 +4971,6 @@ pub fn init(ip: *InternPool, gpa: Allocator, total_threads: usize) !void { } pub fn deinit(ip: *InternPool, gpa: Allocator) void { - ip.items.deinit(gpa); ip.extra.deinit(gpa); ip.limbs.deinit(gpa); @@ -4955,7 +5007,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { pub fn indexToKey(ip: *const InternPool, index: Index) Key { assert(index != .none); - const item = ip.items.get(@intFromEnum(index)); + const item = index.getItem(ip); const data = item.data; return switch (item.tag) { .removed => unreachable, @@ -5001,8 +5053,10 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .type_pointer => .{ .ptr_type = ip.extraData(Tag.TypePointer, data) }, .type_slice => { - assert(ip.items.items(.tag)[data] == .type_pointer); - var ptr_info = ip.extraData(Tag.TypePointer, ip.items.items(.data)[data]); + const many_ptr_index: Index = @enumFromInt(data); + const many_ptr_item = many_ptr_index.getItem(ip); + assert(many_ptr_item.tag == .type_pointer); + var ptr_info = ip.extraData(Tag.TypePointer, many_ptr_item.data); ptr_info.flags.size = .Slice; return .{ .ptr_type = ptr_info }; }, @@ -5196,7 +5250,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .ptr_elem => { // Avoid `indexToKey` recursion by asserting the tag encoding. const info = ip.extraData(PtrBaseIndex, data); - const index_item = ip.items.get(@intFromEnum(info.index)); + const index_item = info.index.getItem(ip); return switch (index_item.tag) { .int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .arr_elem = .{ .base = info.base, @@ -5209,7 +5263,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .ptr_field => { // Avoid `indexToKey` recursion by asserting the tag encoding. const info = ip.extraData(PtrBaseIndex, data); - const index_item = ip.items.get(@intFromEnum(info.index)); + const index_item = info.index.getItem(ip); return switch (index_item.tag) { .int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .field = .{ .base = info.base, @@ -5326,7 +5380,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .func_coerced => .{ .func = ip.extraFuncCoerced(data) }, .only_possible_value => { const ty: Index = @enumFromInt(data); - const ty_item = ip.items.get(@intFromEnum(ty)); + const ty_item = ty.getItem(ip); return switch (ty_item.tag) { .type_array_big => { const sentinel = @as( @@ -5557,7 +5611,7 @@ fn extraFuncInstance(ip: *const InternPool, extra_index: u32) Key.Func { fn extraFuncCoerced(ip: *const InternPool, extra_index: u32) Key.Func { const func_coerced = ip.extraData(Tag.FuncCoerced, extra_index); - const sub_item = ip.items.get(@intFromEnum(func_coerced.func)); + const sub_item = func_coerced.func.getItem(ip); var func: Key.Func = switch (sub_item.tag) { .func_instance => ip.extraFuncInstance(sub_item.data), .func_decl => ip.extraFuncDecl(sub_item.data), @@ -5581,21 +5635,30 @@ fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key const GetOrPutKey = union(enum) { existing: Index, new: struct { + ip: *InternPool, + tid: Zcu.PerThread.Id, shard: *Shard, map_index: u32, }, - fn set(gop: *GetOrPutKey, index: Index) Index { + fn put(gop: *GetOrPutKey) Index { + return gop.putAt(0); + } + fn putAt(gop: *GetOrPutKey, offset: u32) Index { switch (gop.*) { .existing => unreachable, .new => |info| { + const index = Index.Unwrapped.wrap(.{ + .tid = info.tid, + .index = info.ip.getLocal(info.tid).mutate.items.len - 1 - offset, + }, info.ip); info.shard.shared.map.entries[info.map_index].release(index); info.shard.mutate.map.len += 1; info.shard.mutate.map.mutex.unlock(); + gop.* = .{ .existing = index }; + return index; }, } - gop.* = .{ .existing = index }; - return index; } fn assign(gop: *GetOrPutKey, new_gop: GetOrPutKey) void { @@ -5692,21 +5755,27 @@ fn getOrPutKey( shard.shared.map.release(new_map); } map.entries[map_index].hash = hash; - return .{ .new = .{ .shard = shard, .map_index = map_index } }; + return .{ .new = .{ + .ip = ip, + .tid = tid, + .shard = shard, + .map_index = map_index, + } }; } pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index { var gop = try ip.getOrPutKey(gpa, tid, key); defer gop.deinit(); if (gop == .existing) return gop.existing; - try ip.items.ensureUnusedCapacity(gpa, 1); + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); switch (key) { .int_type => |int_type| { const t: Tag = switch (int_type.signedness) { .signed => .type_int_signed, .unsigned => .type_int_unsigned, }; - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = t, .data = int_type.bits, }); @@ -5721,18 +5790,18 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All const ptr_type_index = try ip.get(gpa, tid, new_key); gop.assign(try ip.getOrPutKey(gpa, tid, key)); - try ip.items.ensureUnusedCapacity(gpa, 1); - ip.items.appendAssumeCapacity(.{ + try items.ensureUnusedCapacity(1); + items.appendAssumeCapacity(.{ .tag = .type_slice, .data = @intFromEnum(ptr_type_index), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } var ptr_type_adjusted = ptr_type; if (ptr_type.flags.size == .C) ptr_type_adjusted.flags.is_allowzero = true; - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_pointer, .data = try ip.addExtra(gpa, ptr_type_adjusted), }); @@ -5743,19 +5812,19 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All if (std.math.cast(u32, array_type.len)) |len| { if (array_type.sentinel == .none) { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_array_small, .data = try ip.addExtra(gpa, Vector{ .len = len, .child = array_type.child, }), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } } const length = Array.Length.init(array_type.len); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_array_big, .data = try ip.addExtra(gpa, Array{ .len0 = length.a, @@ -5766,7 +5835,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }); }, .vector_type => |vector_type| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_vector, .data = try ip.addExtra(gpa, Vector{ .len = vector_type.len, @@ -5776,20 +5845,20 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, .opt_type => |payload_type| { assert(payload_type != .none); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_optional, .data = @intFromEnum(payload_type), }); }, .anyframe_type => |payload_type| { // payload_type might be none, indicating the type is `anyframe`. - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_anyframe, .data = @intFromEnum(payload_type), }); }, .error_union_type => |error_union_type| { - ip.items.appendAssumeCapacity(if (error_union_type.error_set_type == .anyerror_type) .{ + items.appendAssumeCapacity(if (error_union_type.error_set_type == .anyerror_type) .{ .tag = .type_anyerror_union, .data = @intFromEnum(error_union_type.payload_type), } else .{ @@ -5805,7 +5874,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All addStringsToMap(ip, names_map, names); const names_len = error_set_type.names.len; try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names_len); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_error_set, .data = ip.addExtraAssumeCapacity(Tag.ErrorSet{ .names_len = names_len, @@ -5815,26 +5884,26 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All ip.extra.appendSliceAssumeCapacity(@ptrCast(error_set_type.names.get(ip))); }, .inferred_error_set_type => |ies_index| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_inferred_error_set, .data = @intFromEnum(ies_index), }); }, .simple_type => |simple_type| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .simple_type, .data = @intFromEnum(simple_type), }); }, .simple_value => |simple_value| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .simple_value, .data = @intFromEnum(simple_value), }); }, .undef => |ty| { assert(ty != .none); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .undef, .data = @intFromEnum(ty), }); @@ -5853,7 +5922,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .variable => |variable| { const has_init = variable.init != .none; if (has_init) assert(variable.ty == ip.typeOf(variable.init)); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .variable, .data = try ip.addExtra(gpa, Tag.Variable{ .ty = variable.ty, @@ -5873,7 +5942,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .slice => |slice| { assert(ip.indexToKey(slice.ty).ptr_type.flags.size == .Slice); assert(ip.indexToKey(ip.typeOf(slice.ptr)).ptr_type.flags.size == .Many); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .ptr_slice, .data = try ip.addExtra(gpa, PtrSlice{ .ty = slice.ty, @@ -5886,7 +5955,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .ptr => |ptr| { const ptr_type = ip.indexToKey(ptr.ty).ptr_type; assert(ptr_type.flags.size != .Slice); - ip.items.appendAssumeCapacity(switch (ptr.base_addr) { + items.appendAssumeCapacity(switch (ptr.base_addr) { .decl => |decl| .{ .tag = .ptr_decl, .data = try ip.addExtra(gpa, PtrDecl.init(ptr.ty, decl, ptr.byte_offset)), @@ -5975,8 +6044,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .storage = .{ .u64 = base_index.index }, } }); gop.assign(try ip.getOrPutKey(gpa, tid, key)); - try ip.items.ensureUnusedCapacity(gpa, 1); - ip.items.appendAssumeCapacity(.{ + try items.ensureUnusedCapacity(1); + items.appendAssumeCapacity(.{ .tag = switch (ptr.base_addr) { .arr_elem => .ptr_elem, .field => .ptr_field, @@ -5984,7 +6053,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, .data = try ip.addExtra(gpa, PtrBaseIndex.init(ptr.ty, base_index.base, index_index, ptr.byte_offset)), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); }, }); }, @@ -5992,7 +6061,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .opt => |opt| { assert(ip.isOptionalType(opt.ty)); assert(opt.val == .none or ip.indexToKey(opt.ty).opt_type == ip.typeOf(opt.val)); - ip.items.appendAssumeCapacity(if (opt.val == .none) .{ + items.appendAssumeCapacity(if (opt.val == .none) .{ .tag = .opt_null, .data = @intFromEnum(opt.ty), } else .{ @@ -6009,7 +6078,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All switch (int.storage) { .u64, .i64, .big_int => {}, .lazy_align, .lazy_size => |lazy_ty| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = switch (int.storage) { else => unreachable, .lazy_align => .int_lazy_align, @@ -6020,20 +6089,20 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .lazy_ty = lazy_ty, }), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); }, } switch (int.ty) { .u8_type => switch (int.storage) { .big_int => |big_int| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_u8, .data = big_int.to(u8) catch unreachable, }); break :b; }, inline .u64, .i64 => |x| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_u8, .data = @as(u8, @intCast(x)), }); @@ -6043,14 +6112,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, .u16_type => switch (int.storage) { .big_int => |big_int| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_u16, .data = big_int.to(u16) catch unreachable, }); break :b; }, inline .u64, .i64 => |x| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_u16, .data = @as(u16, @intCast(x)), }); @@ -6060,14 +6129,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, .u32_type => switch (int.storage) { .big_int => |big_int| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_u32, .data = big_int.to(u32) catch unreachable, }); break :b; }, inline .u64, .i64 => |x| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_u32, .data = @as(u32, @intCast(x)), }); @@ -6078,14 +6147,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .i32_type => switch (int.storage) { .big_int => |big_int| { const casted = big_int.to(i32) catch unreachable; - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_i32, .data = @as(u32, @bitCast(casted)), }); break :b; }, inline .u64, .i64 => |x| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_i32, .data = @as(u32, @bitCast(@as(i32, @intCast(x)))), }); @@ -6096,7 +6165,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .usize_type => switch (int.storage) { .big_int => |big_int| { if (big_int.to(u32)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_usize, .data = casted, }); @@ -6105,7 +6174,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, inline .u64, .i64 => |x| { if (std.math.cast(u32, x)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_usize, .data = casted, }); @@ -6117,14 +6186,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .comptime_int_type => switch (int.storage) { .big_int => |big_int| { if (big_int.to(u32)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_comptime_int_u32, .data = casted, }); break :b; } else |_| {} if (big_int.to(i32)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_comptime_int_i32, .data = @as(u32, @bitCast(casted)), }); @@ -6133,14 +6202,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, inline .u64, .i64 => |x| { if (std.math.cast(u32, x)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_comptime_int_u32, .data = casted, }); break :b; } if (std.math.cast(i32, x)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_comptime_int_i32, .data = @as(u32, @bitCast(casted)), }); @@ -6154,35 +6223,35 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All switch (int.storage) { .big_int => |big_int| { if (big_int.to(u32)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_small, .data = try ip.addExtra(gpa, IntSmall{ .ty = int.ty, .value = casted, }), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } else |_| {} const tag: Tag = if (big_int.positive) .int_positive else .int_negative; - try addInt(ip, gpa, int.ty, tag, big_int.limbs); + try addInt(ip, gpa, tid, int.ty, tag, big_int.limbs); }, inline .u64, .i64 => |x| { if (std.math.cast(u32, x)) |casted| { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .int_small, .data = try ip.addExtra(gpa, IntSmall{ .ty = int.ty, .value = casted, }), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } var buf: [2]Limb = undefined; const big_int = BigIntMutable.init(&buf, x).toConst(); const tag: Tag = if (big_int.positive) .int_positive else .int_negative; - try addInt(ip, gpa, int.ty, tag, big_int.limbs); + try addInt(ip, gpa, tid, int.ty, tag, big_int.limbs); }, .lazy_align, .lazy_size => unreachable, } @@ -6190,7 +6259,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .err => |err| { assert(ip.isErrorSetType(err.ty)); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .error_set_error, .data = try ip.addExtra(gpa, err), }); @@ -6198,7 +6267,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .error_union => |error_union| { assert(ip.isErrorUnionType(error_union.ty)); - ip.items.appendAssumeCapacity(switch (error_union.val) { + items.appendAssumeCapacity(switch (error_union.val) { .err_name => |err_name| .{ .tag = .error_union_error, .data = try ip.addExtra(gpa, Key.Error{ @@ -6216,7 +6285,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }); }, - .enum_literal => |enum_literal| ip.items.appendAssumeCapacity(.{ + .enum_literal => |enum_literal| items.appendAssumeCapacity(.{ .tag = .enum_literal, .data = @intFromEnum(enum_literal), }), @@ -6228,50 +6297,50 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .enum_type => assert(ip.typeOf(enum_tag.int) == ip.loadEnumType(enum_tag.ty).tag_ty), else => unreachable, } - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .enum_tag, .data = try ip.addExtra(gpa, enum_tag), }); }, - .empty_enum_value => |enum_or_union_ty| ip.items.appendAssumeCapacity(.{ + .empty_enum_value => |enum_or_union_ty| items.appendAssumeCapacity(.{ .tag = .only_possible_value, .data = @intFromEnum(enum_or_union_ty), }), .float => |float| { switch (float.ty) { - .f16_type => ip.items.appendAssumeCapacity(.{ + .f16_type => items.appendAssumeCapacity(.{ .tag = .float_f16, .data = @as(u16, @bitCast(float.storage.f16)), }), - .f32_type => ip.items.appendAssumeCapacity(.{ + .f32_type => items.appendAssumeCapacity(.{ .tag = .float_f32, .data = @as(u32, @bitCast(float.storage.f32)), }), - .f64_type => ip.items.appendAssumeCapacity(.{ + .f64_type => items.appendAssumeCapacity(.{ .tag = .float_f64, .data = try ip.addExtra(gpa, Float64.pack(float.storage.f64)), }), - .f80_type => ip.items.appendAssumeCapacity(.{ + .f80_type => items.appendAssumeCapacity(.{ .tag = .float_f80, .data = try ip.addExtra(gpa, Float80.pack(float.storage.f80)), }), - .f128_type => ip.items.appendAssumeCapacity(.{ + .f128_type => items.appendAssumeCapacity(.{ .tag = .float_f128, .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), }), .c_longdouble_type => switch (float.storage) { - .f80 => |x| ip.items.appendAssumeCapacity(.{ + .f80 => |x| items.appendAssumeCapacity(.{ .tag = .float_c_longdouble_f80, .data = try ip.addExtra(gpa, Float80.pack(x)), }), - inline .f16, .f32, .f64, .f128 => |x| ip.items.appendAssumeCapacity(.{ + inline .f16, .f32, .f64, .f128 => |x| items.appendAssumeCapacity(.{ .tag = .float_c_longdouble_f128, .data = try ip.addExtra(gpa, Float128.pack(x)), }), }, - .comptime_float_type => ip.items.appendAssumeCapacity(.{ + .comptime_float_type => items.appendAssumeCapacity(.{ .tag = .float_comptime_float, .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), }), @@ -6331,11 +6400,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All } if (len == 0) { - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .only_possible_value, .data = @intFromEnum(aggregate.ty), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } switch (ty_key) { @@ -6364,11 +6433,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All // This encoding works thanks to the fact that, as we just verified, // the type itself contains a slice of values that can be provided // in the aggregate fields. - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .only_possible_value, .data = @intFromEnum(aggregate.ty), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); }, else => {}, } @@ -6388,7 +6457,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .storage = .{ .u64 = bytes.at(0, ip) }, } }); gop.assign(try ip.getOrPutKey(gpa, tid, key)); - try ip.items.ensureUnusedCapacity(gpa, 1); + try items.ensureUnusedCapacity(1); break :elem elem; }, .elems => |elems| elems[0], @@ -6399,14 +6468,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All gpa, @typeInfo(Repeated).Struct.fields.len, ); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .repeated, .data = ip.addExtraAssumeCapacity(Repeated{ .ty = aggregate.ty, .elem_val = elem, }), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } if (child == .u8_type) bytes: { @@ -6442,21 +6511,21 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All @intCast(len_including_sentinel), .maybe_embedded_nulls, ); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .bytes, .data = ip.addExtraAssumeCapacity(Bytes{ .ty = aggregate.ty, .bytes = string, }), }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } try ip.extra.ensureUnusedCapacity( gpa, @typeInfo(Tag.Aggregate).Struct.fields.len + @as(usize, @intCast(len_including_sentinel + 1)), ); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .aggregate, .data = ip.addExtraAssumeCapacity(Tag.Aggregate{ .ty = aggregate.ty, @@ -6469,7 +6538,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .un => |un| { assert(un.ty != .none); assert(un.val != .none); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .union_value, .data = try ip.addExtra(gpa, un), }); @@ -6479,7 +6548,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All for (memoized_call.arg_values) |arg| assert(arg != .none); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(MemoizedCall).Struct.fields.len + memoized_call.arg_values.len); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .memoized_call, .data = ip.addExtraAssumeCapacity(MemoizedCall{ .func = memoized_call.func, @@ -6490,7 +6559,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All ip.extra.appendSliceAssumeCapacity(@ptrCast(memoized_call.arg_values)); }, } - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } pub const UnionTypeInit = struct { @@ -6544,6 +6613,8 @@ pub fn getUnionType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; + const items = ip.getLocal(tid).getMutableItems(gpa); + const align_elements_len = if (ini.flags.any_aligned_fields) (ini.fields_len + 3) / 4 else 0; const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeUnion).Struct.fields.len + @@ -6556,7 +6627,7 @@ pub fn getUnionType( // zig fmt: on ini.fields_len + // field types align_elements_len); - try ip.items.ensureUnusedCapacity(gpa, 1); + try items.ensureUnusedCapacity(1); const extra_index = ip.addExtraAssumeCapacity(Tag.TypeUnion{ .flags = .{ @@ -6582,7 +6653,7 @@ pub fn getUnionType( }, }); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_union, .data = extra_index, }); @@ -6618,7 +6689,7 @@ pub fn getUnionType( } return .{ .wip = .{ - .index = gop.set(@enumFromInt(ip.items.len - 1)), + .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "decl").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeUnion, "namespace").? @@ -6640,8 +6711,8 @@ pub const WipNamespaceType = struct { } return wip.index; } - pub fn cancel(wip: WipNamespaceType, ip: *InternPool) void { - ip.remove(wip.index); + pub fn cancel(wip: WipNamespaceType, ip: *InternPool, tid: Zcu.PerThread.Id) void { + ip.remove(tid, wip.index); } pub const Result = union(enum) { @@ -6692,6 +6763,8 @@ pub fn getStructType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; + const items = ip.getLocal(tid).getMutableItems(gpa); + const names_map = try ip.addMap(gpa, ini.fields_len); errdefer _ = ip.maps.pop(); @@ -6728,7 +6801,7 @@ pub fn getStructType( .is_reified = ini.key == .reified, }, }); - try ip.items.append(gpa, .{ + try items.append(.{ .tag = if (ini.any_default_inits) .type_struct_packed_inits else .type_struct_packed, .data = extra_index, }); @@ -6747,7 +6820,7 @@ pub fn getStructType( ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); } return .{ .wip = .{ - .index = gop.set(@enumFromInt(ip.items.len - 1)), + .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "decl").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").? @@ -6800,7 +6873,7 @@ pub fn getStructType( .is_reified = ini.key == .reified, }, }); - try ip.items.append(gpa, .{ + try items.append(.{ .tag = .type_struct, .data = extra_index, }); @@ -6836,7 +6909,7 @@ pub fn getStructType( } ip.extra.appendNTimesAssumeCapacity(std.math.maxInt(u32), ini.fields_len); return .{ .wip = .{ - .index = gop.set(@enumFromInt(ip.items.len - 1)), + .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "decl").?, .namespace_extra_index = namespace_extra_index, } }; @@ -6859,6 +6932,8 @@ pub fn getAnonStructType( assert(ini.types.len == ini.values.len); for (ini.types) |elem| assert(elem != .none); + const items = ip.getLocal(tid).getMutableItems(gpa); + const prev_extra_len = ip.extra.items.len; const fields_len: u32 = @intCast(ini.types.len); @@ -6866,7 +6941,7 @@ pub fn getAnonStructType( gpa, @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3), ); - try ip.items.ensureUnusedCapacity(gpa, 1); + try items.ensureUnusedCapacity(1); const extra_index = ip.addExtraAssumeCapacity(TypeStructAnon{ .fields_len = fields_len, @@ -6888,11 +6963,11 @@ pub fn getAnonStructType( return gop.existing; } - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = if (ini.names.len == 0) .type_tuple_anon else .type_struct_anon, .data = extra_index, }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } /// This is equivalent to `Key.FuncType` but adjusted to have a slice for `param_types`. @@ -6930,7 +7005,9 @@ pub fn getFuncType( @intFromBool(key.comptime_bits != 0) + @intFromBool(key.noalias_bits != 0) + params_len); - try ip.items.ensureUnusedCapacity(gpa, 1); + + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ .params_len = params_len, @@ -6962,11 +7039,11 @@ pub fn getFuncType( return gop.existing; } - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_function, .data = func_type_extra_index, }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } pub fn getExternFunc( @@ -6981,12 +7058,13 @@ pub fn getExternFunc( const prev_extra_len = ip.extra.items.len; const extra_index = try ip.addExtra(gpa, @as(Tag.ExternFunc, key)); errdefer ip.extra.items.len = prev_extra_len; - try ip.items.append(gpa, .{ + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.append(.{ .tag = .extern_func, .data = extra_index, }); - errdefer ip.items.len -= 1; - return gop.set(@enumFromInt(ip.items.len - 1)); + errdefer ip.items.lenPtr().* -= 1; + return gop.put(); } pub const GetFuncDeclKey = struct { @@ -7013,7 +7091,9 @@ pub fn getFuncDecl( const prev_extra_len = ip.extra.items.len; try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len); - try ip.items.ensureUnusedCapacity(gpa, 1); + + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ .analysis = .{ @@ -7043,11 +7123,11 @@ pub fn getFuncDecl( return gop.existing; } - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .func_decl, .data = func_decl_extra_index, }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } pub const GetFuncDeclIesKey = struct { @@ -7095,7 +7175,26 @@ pub fn getFuncDeclIes( @intFromBool(key.comptime_bits != 0) + @intFromBool(key.noalias_bits != 0) + params_len); - try ip.items.ensureUnusedCapacity(gpa, 4); + + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(4); + + const func_index = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 0, + }, ip); + const error_union_type = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 1, + }, ip); + const error_set_type = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 2, + }, ip); + const func_ty = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 3, + }, ip); const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ .analysis = .{ @@ -7107,36 +7206,18 @@ pub fn getFuncDeclIes( .inferred_error_set = true, }, .owner_decl = key.owner_decl, - .ty = @enumFromInt(ip.items.len + 3), + .ty = func_ty, .zir_body_inst = key.zir_body_inst, .lbrace_line = key.lbrace_line, .rbrace_line = key.rbrace_line, .lbrace_column = key.lbrace_column, .rbrace_column = key.rbrace_column, }); - - ip.items.appendAssumeCapacity(.{ - .tag = .func_decl, - .data = func_decl_extra_index, - }); ip.extra.appendAssumeCapacity(@intFromEnum(Index.none)); - ip.items.appendAssumeCapacity(.{ - .tag = .type_error_union, - .data = ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ - .error_set_type = @enumFromInt(ip.items.len + 1), - .payload_type = key.bare_return_type, - }), - }); - - ip.items.appendAssumeCapacity(.{ - .tag = .type_inferred_error_set, - .data = @intCast(ip.items.len - 2), - }); - const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ .params_len = params_len, - .return_type = @enumFromInt(ip.items.len - 2), + .return_type = error_union_type, .flags = .{ .cc = key.cc orelse .Unspecified, .is_var_args = key.is_var_args, @@ -7152,45 +7233,57 @@ pub fn getFuncDeclIes( if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); + + items.appendSliceAssumeCapacity(.{ + .tag = &.{ + .func_decl, + .type_error_union, + .type_inferred_error_set, + .type_function, + }, + .data = &.{ + func_decl_extra_index, + ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ + .error_set_type = error_set_type, + .payload_type = key.bare_return_type, + }), + @intFromEnum(func_index), + func_type_extra_index, + }, + }); errdefer { - ip.items.len -= 4; + items.lenPtr().* -= 4; ip.extra.items.len = prev_extra_len; } - ip.items.appendAssumeCapacity(.{ - .tag = .type_function, - .data = func_type_extra_index, - }); - - var gop = try ip.getOrPutKey(gpa, tid, .{ + var func_gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncDecl(ip, func_decl_extra_index), }); - defer gop.deinit(); - if (gop == .existing) { + defer func_gop.deinit(); + if (func_gop == .existing) { // An existing function type was found; undo the additions to our two arrays. - ip.items.len -= 4; + items.lenPtr().* -= 4; ip.extra.items.len = prev_extra_len; - return gop.existing; + return func_gop.existing; } - - var eu_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ - .error_set_type = @enumFromInt(ip.items.len - 2), + var error_union_type_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ + .error_set_type = error_set_type, .payload_type = key.bare_return_type, } }); - defer eu_gop.deinit(); - var ies_gop = try ip.getOrPutKey(gpa, tid, .{ - .inferred_error_set_type = @enumFromInt(ip.items.len - 4), + defer error_union_type_gop.deinit(); + var error_set_type_gop = try ip.getOrPutKey(gpa, tid, .{ + .inferred_error_set_type = func_index, }); - defer ies_gop.deinit(); - var ty_gop = try ip.getOrPutKey(gpa, tid, .{ + defer error_set_type_gop.deinit(); + var func_ty_gop = try ip.getOrPutKey(gpa, tid, .{ .func_type = extraFuncType(ip, func_type_extra_index), }); - defer ty_gop.deinit(); - const index = gop.set(@enumFromInt(ip.items.len - 4)); - _ = eu_gop.set(@enumFromInt(@intFromEnum(index) + 1)); - _ = ies_gop.set(@enumFromInt(@intFromEnum(index) + 2)); - _ = ty_gop.set(@enumFromInt(@intFromEnum(index) + 3)); - return index; + defer func_ty_gop.deinit(); + assert(func_gop.putAt(3) == func_index); + assert(error_union_type_gop.putAt(2) == error_union_type); + assert(error_set_type_gop.putAt(1) == error_set_type); + assert(func_ty_gop.putAt(0) == func_ty); + return func_index; } pub fn getErrorSetType( @@ -7227,11 +7320,12 @@ pub fn getErrorSetType( return gop.existing; } - try ip.items.append(gpa, .{ + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.append(.{ .tag = .type_error_set, .data = error_set_extra_index, }); - errdefer ip.items.len -= 1; + errdefer items.lenPtr().* -= 1; const names_map = try ip.addMap(gpa, names.len); assert(names_map == predicted_names_map); @@ -7239,7 +7333,7 @@ pub fn getErrorSetType( addStringsToMap(ip, names_map, names); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } pub const GetFuncInstanceKey = struct { @@ -7312,15 +7406,14 @@ pub fn getFuncInstance( return gop.existing; } - const func_index: Index = @enumFromInt(ip.items.len); - - try ip.items.append(gpa, .{ + const items = ip.getLocal(tid).getMutableItems(gpa); + const func_index = Index.Unwrapped.wrap(.{ .tid = tid, .index = items.lenPtr().* }, ip); + try items.append(.{ .tag = .func_instance, .data = func_extra_index, }); - errdefer ip.items.len -= 1; - - return gop.set(try finishFuncInstance( + errdefer items.lenPtr().* -= 1; + try finishFuncInstance( ip, gpa, tid, @@ -7329,7 +7422,8 @@ pub fn getFuncInstance( func_extra_index, arg.alignment, arg.section, - )); + ); + return gop.put(); } /// This function exists separately than `getFuncInstance` because it needs to @@ -7361,12 +7455,26 @@ pub fn getFuncInstanceIes( @typeInfo(Tag.TypeFunction).Struct.fields.len + @intFromBool(arg.noalias_bits != 0) + params_len); - try ip.items.ensureUnusedCapacity(gpa, 4); - const func_index: Index = @enumFromInt(ip.items.len); - const error_union_type: Index = @enumFromInt(ip.items.len + 1); - const error_set_type: Index = @enumFromInt(ip.items.len + 2); - const func_ty: Index = @enumFromInt(ip.items.len + 3); + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(4); + + const func_index = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 0, + }, ip); + const error_union_type = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 1, + }, ip); + const error_set_type = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 2, + }, ip); + const func_ty = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.lenPtr().* + 3, + }, ip); const func_extra_index = ip.addExtraAssumeCapacity(Tag.FuncInstance{ .analysis = .{ @@ -7406,57 +7514,52 @@ pub fn getFuncInstanceIes( if (arg.noalias_bits != 0) ip.extra.appendAssumeCapacity(arg.noalias_bits); ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.param_types)); - // TODO: add appendSliceAssumeCapacity to MultiArrayList. - ip.items.appendAssumeCapacity(.{ - .tag = .func_instance, - .data = func_extra_index, - }); - ip.items.appendAssumeCapacity(.{ - .tag = .type_error_union, - .data = ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ - .error_set_type = error_set_type, - .payload_type = arg.bare_return_type, - }), - }); - ip.items.appendAssumeCapacity(.{ - .tag = .type_inferred_error_set, - .data = @intFromEnum(func_index), - }); - ip.items.appendAssumeCapacity(.{ - .tag = .type_function, - .data = func_type_extra_index, + items.appendSliceAssumeCapacity(.{ + .tag = &.{ + .func_instance, + .type_error_union, + .type_inferred_error_set, + .type_function, + }, + .data = &.{ + func_extra_index, + ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ + .error_set_type = error_set_type, + .payload_type = arg.bare_return_type, + }), + @intFromEnum(func_index), + func_type_extra_index, + }, }); errdefer { - ip.items.len -= 4; + items.lenPtr().* -= 4; ip.extra.items.len = prev_extra_len; } - var gop = try ip.getOrPutKey(gpa, tid, .{ + var func_gop = try ip.getOrPutKey(gpa, tid, .{ .func = extraFuncInstance(ip, func_extra_index), }); - defer gop.deinit(); - if (gop == .existing) { + defer func_gop.deinit(); + if (func_gop == .existing) { // Hot path: undo the additions to our two arrays. - ip.items.len -= 4; + items.lenPtr().* -= 4; ip.extra.items.len = prev_extra_len; - return gop.existing; + return func_gop.existing; } - - // Synchronize the map with items. - var eu_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ + var error_union_type_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ .error_set_type = error_set_type, .payload_type = arg.bare_return_type, } }); - defer eu_gop.deinit(); - var ies_gop = try ip.getOrPutKey(gpa, tid, .{ + defer error_union_type_gop.deinit(); + var error_set_type_gop = try ip.getOrPutKey(gpa, tid, .{ .inferred_error_set_type = func_index, }); - defer ies_gop.deinit(); - var ty_gop = try ip.getOrPutKey(gpa, tid, .{ + defer error_set_type_gop.deinit(); + var func_ty_gop = try ip.getOrPutKey(gpa, tid, .{ .func_type = extraFuncType(ip, func_type_extra_index), }); - defer ty_gop.deinit(); - const index = gop.set(try finishFuncInstance( + defer func_ty_gop.deinit(); + try finishFuncInstance( ip, gpa, tid, @@ -7465,11 +7568,12 @@ pub fn getFuncInstanceIes( func_extra_index, arg.alignment, arg.section, - )); - _ = eu_gop.set(@enumFromInt(@intFromEnum(index) + 1)); - _ = ies_gop.set(@enumFromInt(@intFromEnum(index) + 2)); - _ = ty_gop.set(@enumFromInt(@intFromEnum(index) + 3)); - return index; + ); + assert(func_gop.putAt(3) == func_index); + assert(error_union_type_gop.putAt(2) == error_union_type); + assert(error_set_type_gop.putAt(1) == error_set_type); + assert(func_ty_gop.putAt(0) == func_ty); + return func_index; } fn finishFuncInstance( @@ -7481,7 +7585,7 @@ fn finishFuncInstance( func_extra_index: u32, alignment: Alignment, section: OptionalNullTerminatedString, -) Allocator.Error!Index { +) Allocator.Error!void { const fn_owner_decl = ip.declPtr(ip.funcDeclOwner(generic_owner)); const decl_index = try ip.createDecl(gpa, .{ .name = undefined, @@ -7510,8 +7614,6 @@ fn finishFuncInstance( decl.name = try ip.getOrPutStringFmt(gpa, tid, "{}__anon_{d}", .{ fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index), }, .no_embedded_nulls); - - return func_index; } pub const EnumTypeInit = struct { @@ -7589,8 +7691,8 @@ pub const WipEnumType = struct { return null; } - pub fn cancel(wip: WipEnumType, ip: *InternPool) void { - ip.remove(wip.index); + pub fn cancel(wip: WipEnumType, ip: *InternPool, tid: Zcu.PerThread.Id) void { + ip.remove(tid, wip.index); } pub const Result = union(enum) { @@ -7618,7 +7720,8 @@ pub fn getEnumType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; - try ip.items.ensureUnusedCapacity(gpa, 1); + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); const names_map = try ip.addMap(gpa, ini.fields_len); errdefer _ = ip.maps.pop(); @@ -7650,7 +7753,7 @@ pub fn getEnumType( inline else => |x| x.zir_index, }.toOptional(), }); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_enum_auto, .data = extra_index, }); @@ -7661,7 +7764,7 @@ pub fn getEnumType( const names_start = ip.extra.items.len; ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); return .{ .wip = .{ - .index = gop.set(@enumFromInt(ip.items.len - 1)), + .index = gop.put(), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, .namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null, @@ -7706,7 +7809,7 @@ pub fn getEnumType( inline else => |x| x.zir_index, }.toOptional(), }); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = switch (ini.tag_mode) { .auto => unreachable, .explicit => .type_enum_explicit, @@ -7725,7 +7828,7 @@ pub fn getEnumType( ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); } return .{ .wip = .{ - .index = gop.set(@enumFromInt(ip.items.len - 1)), + .index = gop.put(), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, .namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null, @@ -7760,7 +7863,8 @@ pub fn getGeneratedTagEnumType( assert(ip.isIntegerType(ini.tag_ty)); for (ini.values) |val| assert(ip.typeOf(val) == ini.tag_ty); - try ip.items.ensureUnusedCapacity(gpa, 1); + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); const names_map = try ip.addMap(gpa, ini.names.len); errdefer _ = ip.maps.pop(); @@ -7774,7 +7878,7 @@ pub fn getGeneratedTagEnumType( try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + 1 + // owner_union fields_len); // field names - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_enum_auto, .data = ip.addExtraAssumeCapacity(EnumAuto{ .decl = ini.decl, @@ -7803,7 +7907,7 @@ pub fn getGeneratedTagEnumType( // We don't clean up the values map on error! errdefer @compileError("error path leaks values_map"); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = switch (ini.tag_mode) { .explicit => .type_enum_explicit, .nonexhaustive => .type_enum_nonexhaustive, @@ -7835,7 +7939,7 @@ pub fn getGeneratedTagEnumType( .generated_tag = .{ .union_type = ini.owner_union_ty }, } }); defer gop.deinit(); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } pub const OpaqueTypeInit = struct { @@ -7870,7 +7974,10 @@ pub fn getOpaqueType( } }); defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; - try ip.items.ensureUnusedCapacity(gpa, 1); + + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeOpaque).Struct.fields.len + switch (ini.key) { .declared => |d| d.captures.len, .reified => 0, @@ -7886,7 +7993,7 @@ pub fn getOpaqueType( .reified => std.math.maxInt(u32), }, }); - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .type_opaque, .data = extra_index, }); @@ -7895,7 +8002,7 @@ pub fn getOpaqueType( .reified => {}, } return .{ .wip = .{ - .index = gop.set(@enumFromInt(ip.items.len - 1)), + .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "decl").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "namespace").? @@ -7958,37 +8065,50 @@ fn addMap(ip: *InternPool, gpa: Allocator, cap: usize) Allocator.Error!MapIndex /// This operation only happens under compile error conditions. /// Leak the index until the next garbage collection. /// Invalidates all references to this index. -pub fn remove(ip: *InternPool, index: Index) void { +pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void { + const unwrapped = index.unwrap(ip); if (@intFromEnum(index) < static_keys.len) { + if (tid != .main or unwrapped.tid != .main) @panic("This operation is impossible to be thread-safe"); // The item being removed replaced a special index via `InternPool.resolveBuiltinType`. // Restore the original item at this index. + var items = ip.getLocalShared(unwrapped.tid).items.view(); switch (static_keys[@intFromEnum(index)]) { - .simple_type => |s| { - ip.items.set(@intFromEnum(index), .{ - .tag = .simple_type, - .data = @intFromEnum(s), - }); - }, + .simple_type => |s| items.set(@intFromEnum(index), .{ + .tag = .simple_type, + .data = @intFromEnum(s), + }), else => unreachable, } return; } - if (@intFromEnum(index) == ip.items.len - 1) { - // Happy case - we can just drop the item without affecting any other indices. - ip.items.len -= 1; - } else { - // We must preserve the item so that indices following it remain valid. - // Thus, we will rewrite the tag to `removed`, leaking the item until - // next GC but causing `KeyAdapter` to ignore it. - ip.items.set(@intFromEnum(index), .{ .tag = .removed, .data = undefined }); + if (unwrapped.tid == tid) { + const items_len = &ip.getLocal(tid).mutate.items.len; + if (unwrapped.index == items_len.* - 1) { + // Happy case - we can just drop the item without affecting any other indices. + items_len.* -= 1; + return; + } } + + // We must preserve the item so that indices following it remain valid. + // Thus, we will rewrite the tag to `removed`, leaking the item until + // next GC but causing `KeyAdapter` to ignore it. + const items = ip.getLocalShared(unwrapped.tid).items.view(); + @atomicStore(Tag, &items.items(.tag)[unwrapped.index], .removed, .release); } -fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void { +fn addInt( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + ty: Index, + tag: Tag, + limbs: []const Limb, +) !void { const limbs_len: u32 = @intCast(limbs.len); try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); - ip.items.appendAssumeCapacity(.{ + ip.getLocal(tid).getMutableItems(gpa).appendAssumeCapacity(.{ .tag = tag, .data = ip.addLimbsExtraAssumeCapacity(Int{ .ty = ty, @@ -8235,13 +8355,13 @@ pub fn childType(ip: *const InternPool, i: Index) Index { } /// Given a slice type, returns the type of the ptr field. -pub fn slicePtrType(ip: *const InternPool, i: Index) Index { - switch (i) { +pub fn slicePtrType(ip: *const InternPool, index: Index) Index { + switch (index) { .slice_const_u8_type => return .manyptr_const_u8_type, .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, else => {}, } - const item = ip.items.get(@intFromEnum(i)); + const item = index.getItem(ip); switch (item.tag) { .type_slice => return @enumFromInt(item.data), else => unreachable, // not a slice type @@ -8249,8 +8369,8 @@ pub fn slicePtrType(ip: *const InternPool, i: Index) Index { } /// Given a slice value, returns the value of the ptr field. -pub fn slicePtr(ip: *const InternPool, i: Index) Index { - const item = ip.items.get(@intFromEnum(i)); +pub fn slicePtr(ip: *const InternPool, index: Index) Index { + const item = index.getItem(ip); switch (item.tag) { .ptr_slice => return ip.extraData(PtrSlice, item.data).ptr, else => unreachable, // not a slice value @@ -8258,8 +8378,8 @@ pub fn slicePtr(ip: *const InternPool, i: Index) Index { } /// Given a slice value, returns the value of the len field. -pub fn sliceLen(ip: *const InternPool, i: Index) Index { - const item = ip.items.get(@intFromEnum(i)); +pub fn sliceLen(ip: *const InternPool, index: Index) Index { + const item = index.getItem(ip); switch (item.tag) { .ptr_slice => return ip.extraData(PtrSlice, item.data).len, else => unreachable, // not a slice value @@ -8296,8 +8416,6 @@ pub fn getCoerced( const old_ty = ip.typeOf(val); if (old_ty == new_ty) return val; - const tags = ip.items.items(.tag); - switch (val) { .undef => return ip.get(gpa, tid, .{ .undef = new_ty }), .null_value => { @@ -8323,15 +8441,14 @@ pub fn getCoerced( } }), }; }, - else => switch (tags[@intFromEnum(val)]) { + else => switch (val.getTag(ip)) { .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), .func_coerced => { - const extra_index = ip.items.items(.data)[@intFromEnum(val)]; const func: Index = @enumFromInt( - ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncCoerced, "func").?], + ip.extra.items[val.getData(ip) + std.meta.fieldIndex(Tag.FuncCoerced, "func").?], ); - switch (tags[@intFromEnum(func)]) { + switch (func.getTag(ip)) { .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), else => unreachable, @@ -8575,10 +8692,8 @@ fn getCoercedFuncDecl( val: Index, new_ty: Index, ) Allocator.Error!Index { - const datas = ip.items.items(.data); - const extra_index = datas[@intFromEnum(val)]; const prev_ty: Index = @enumFromInt( - ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncDecl, "ty").?], + ip.extra.items[val.getData(ip) + std.meta.fieldIndex(Tag.FuncDecl, "ty").?], ); if (new_ty == prev_ty) return val; return getCoercedFunc(ip, gpa, tid, val, new_ty); @@ -8591,10 +8706,8 @@ fn getCoercedFuncInstance( val: Index, new_ty: Index, ) Allocator.Error!Index { - const datas = ip.items.items(.data); - const extra_index = datas[@intFromEnum(val)]; const prev_ty: Index = @enumFromInt( - ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "ty").?], + ip.extra.items[val.getData(ip) + std.meta.fieldIndex(Tag.FuncInstance, "ty").?], ); if (new_ty == prev_ty) return val; return getCoercedFunc(ip, gpa, tid, val, new_ty); @@ -8609,7 +8722,9 @@ fn getCoercedFunc( ) Allocator.Error!Index { const prev_extra_len = ip.extra.items.len; try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncCoerced).Struct.fields.len); - try ip.items.ensureUnusedCapacity(gpa, 1); + + const items = ip.getLocal(tid).getMutableItems(gpa); + try items.ensureUnusedCapacity(1); const extra_index = ip.addExtraAssumeCapacity(Tag.FuncCoerced{ .ty = ty, @@ -8626,11 +8741,11 @@ fn getCoercedFunc( return gop.existing; } - ip.items.appendAssumeCapacity(.{ + items.appendAssumeCapacity(.{ .tag = .func_coerced, .data = extra_index, }); - return gop.set(@enumFromInt(ip.items.len - 1)); + return gop.put(); } /// Asserts `val` has an integer type. @@ -8661,11 +8776,9 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, in } pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { - assert(val != .none); - const tags = ip.items.items(.tag); - const datas = ip.items.items(.data); - switch (tags[@intFromEnum(val)]) { - .type_function => return extraFuncType(ip, datas[@intFromEnum(val)]), + const item = val.getItem(ip); + switch (item.tag) { + .type_function => return extraFuncType(ip, item.data), else => return null, } } @@ -8686,7 +8799,7 @@ pub fn isIntegerType(ip: *const InternPool, ty: Index) bool { .c_ulonglong_type, .comptime_int_type, => true, - else => switch (ip.items.items(.tag)[@intFromEnum(ty)]) { + else => switch (ty.getTag(ip)) { .type_int_signed, .type_int_unsigned, => true, @@ -8762,7 +8875,7 @@ pub fn errorUnionPayload(ip: *const InternPool, ty: Index) Index { /// The is only legal because the initializer is not part of the hash. pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { - const item = ip.items.get(@intFromEnum(index)); + const item = index.getItem(ip); assert(item.tag == .variable); ip.extra.items[item.data + std.meta.fieldIndex(Tag.Variable, "init").?] = @intFromEnum(init_index); } @@ -8773,7 +8886,11 @@ pub fn dump(ip: *const InternPool) void { } fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { - const items_size = (1 + 4) * ip.items.len; + var items_len: usize = 0; + for (ip.locals) |*local| { + items_len += local.mutate.items.len; + } + const items_size = (1 + 4) * items_len; const extra_size = 4 * ip.extra.items.len; const limbs_size = 8 * ip.limbs.items.len; const decls_size = ip.allocated_decls.len * @sizeOf(Module.Decl); @@ -8790,7 +8907,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { \\ , .{ total_size, - ip.items.len, + items_len, items_size, ip.extra.items.len, extra_size, @@ -8800,217 +8917,221 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { decls_size, }); - const tags = ip.items.items(.tag); - const datas = ip.items.items(.data); const TagStats = struct { count: usize = 0, bytes: usize = 0, }; var counts = std.AutoArrayHashMap(Tag, TagStats).init(arena); - for (tags, datas) |tag, data| { - const gop = try counts.getOrPut(tag); - if (!gop.found_existing) gop.value_ptr.* = .{}; - gop.value_ptr.count += 1; - gop.value_ptr.bytes += 1 + 4 + @as(usize, switch (tag) { - // Note that in this case, we have technically leaked some extra data - // bytes which we do not account for here. - .removed => 0, - - .type_int_signed => 0, - .type_int_unsigned => 0, - .type_array_small => @sizeOf(Vector), - .type_array_big => @sizeOf(Array), - .type_vector => @sizeOf(Vector), - .type_pointer => @sizeOf(Tag.TypePointer), - .type_slice => 0, - .type_optional => 0, - .type_anyframe => 0, - .type_error_union => @sizeOf(Key.ErrorUnionType), - .type_anyerror_union => 0, - .type_error_set => b: { - const info = ip.extraData(Tag.ErrorSet, data); - break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len); - }, - .type_inferred_error_set => 0, - .type_enum_explicit, .type_enum_nonexhaustive => b: { - const info = ip.extraData(EnumExplicit, data); - var ints = @typeInfo(EnumExplicit).Struct.fields.len; - if (info.zir_index == .none) ints += 1; - ints += if (info.captures_len != std.math.maxInt(u32)) - info.captures_len - else - @typeInfo(PackedU64).Struct.fields.len; - ints += info.fields_len; - if (info.values_map != .none) ints += info.fields_len; - break :b @sizeOf(u32) * ints; - }, - .type_enum_auto => b: { - const info = ip.extraData(EnumAuto, data); - const ints = @typeInfo(EnumAuto).Struct.fields.len + info.captures_len + info.fields_len; - break :b @sizeOf(u32) * ints; - }, - .type_opaque => b: { - const info = ip.extraData(Tag.TypeOpaque, data); - const ints = @typeInfo(Tag.TypeOpaque).Struct.fields.len + info.captures_len; - break :b @sizeOf(u32) * ints; - }, - .type_struct => b: { - if (data == 0) break :b 0; - const extra = ip.extraDataTrail(Tag.TypeStruct, data); - const info = extra.data; - var ints: usize = @typeInfo(Tag.TypeStruct).Struct.fields.len; - if (info.flags.any_captures) { - const captures_len = ip.extra.items[extra.end]; - ints += 1 + captures_len; - } - ints += info.fields_len; // types - if (!info.flags.is_tuple) { - ints += 1; // names_map - ints += info.fields_len; // names - } - if (info.flags.any_default_inits) - ints += info.fields_len; // inits - ints += @intFromBool(info.flags.has_namespace); // namespace - if (info.flags.any_aligned_fields) - ints += (info.fields_len + 3) / 4; // aligns - if (info.flags.any_comptime_fields) - ints += (info.fields_len + 31) / 32; // comptime bits - if (!info.flags.is_extern) - ints += info.fields_len; // runtime order - ints += info.fields_len; // offsets - break :b @sizeOf(u32) * ints; - }, - .type_struct_anon => b: { - const info = ip.extraData(TypeStructAnon, data); - break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len); - }, - .type_struct_packed => b: { - const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); - const captures_len = if (extra.data.flags.any_captures) - ip.extra.items[extra.end] - else - 0; - break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + - @intFromBool(extra.data.flags.any_captures) + captures_len + - extra.data.fields_len * 2); - }, - .type_struct_packed_inits => b: { - const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); - const captures_len = if (extra.data.flags.any_captures) - ip.extra.items[extra.end] - else - 0; - break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + - @intFromBool(extra.data.flags.any_captures) + captures_len + - extra.data.fields_len * 3); - }, - .type_tuple_anon => b: { - const info = ip.extraData(TypeStructAnon, data); - break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len); - }, - - .type_union => b: { - const extra = ip.extraDataTrail(Tag.TypeUnion, data); - const captures_len = if (extra.data.flags.any_captures) - ip.extra.items[extra.end] - else - 0; - const per_field = @sizeOf(u32); // field type - // 1 byte per field for alignment, rounded up to the nearest 4 bytes - const alignments = if (extra.data.flags.any_aligned_fields) - ((extra.data.fields_len + 3) / 4) * 4 - else - 0; - break :b @sizeOf(Tag.TypeUnion) + - 4 * (@intFromBool(extra.data.flags.any_captures) + captures_len) + - (extra.data.fields_len * per_field) + alignments; - }, + for (ip.locals) |*local| { + const items = local.shared.items.view(); + for ( + items.items(.tag)[0..local.mutate.items.len], + items.items(.data)[0..local.mutate.items.len], + ) |tag, data| { + const gop = try counts.getOrPut(tag); + if (!gop.found_existing) gop.value_ptr.* = .{}; + gop.value_ptr.count += 1; + gop.value_ptr.bytes += 1 + 4 + @as(usize, switch (tag) { + // Note that in this case, we have technically leaked some extra data + // bytes which we do not account for here. + .removed => 0, + + .type_int_signed => 0, + .type_int_unsigned => 0, + .type_array_small => @sizeOf(Vector), + .type_array_big => @sizeOf(Array), + .type_vector => @sizeOf(Vector), + .type_pointer => @sizeOf(Tag.TypePointer), + .type_slice => 0, + .type_optional => 0, + .type_anyframe => 0, + .type_error_union => @sizeOf(Key.ErrorUnionType), + .type_anyerror_union => 0, + .type_error_set => b: { + const info = ip.extraData(Tag.ErrorSet, data); + break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len); + }, + .type_inferred_error_set => 0, + .type_enum_explicit, .type_enum_nonexhaustive => b: { + const info = ip.extraData(EnumExplicit, data); + var ints = @typeInfo(EnumExplicit).Struct.fields.len; + if (info.zir_index == .none) ints += 1; + ints += if (info.captures_len != std.math.maxInt(u32)) + info.captures_len + else + @typeInfo(PackedU64).Struct.fields.len; + ints += info.fields_len; + if (info.values_map != .none) ints += info.fields_len; + break :b @sizeOf(u32) * ints; + }, + .type_enum_auto => b: { + const info = ip.extraData(EnumAuto, data); + const ints = @typeInfo(EnumAuto).Struct.fields.len + info.captures_len + info.fields_len; + break :b @sizeOf(u32) * ints; + }, + .type_opaque => b: { + const info = ip.extraData(Tag.TypeOpaque, data); + const ints = @typeInfo(Tag.TypeOpaque).Struct.fields.len + info.captures_len; + break :b @sizeOf(u32) * ints; + }, + .type_struct => b: { + if (data == 0) break :b 0; + const extra = ip.extraDataTrail(Tag.TypeStruct, data); + const info = extra.data; + var ints: usize = @typeInfo(Tag.TypeStruct).Struct.fields.len; + if (info.flags.any_captures) { + const captures_len = ip.extra.items[extra.end]; + ints += 1 + captures_len; + } + ints += info.fields_len; // types + if (!info.flags.is_tuple) { + ints += 1; // names_map + ints += info.fields_len; // names + } + if (info.flags.any_default_inits) + ints += info.fields_len; // inits + ints += @intFromBool(info.flags.has_namespace); // namespace + if (info.flags.any_aligned_fields) + ints += (info.fields_len + 3) / 4; // aligns + if (info.flags.any_comptime_fields) + ints += (info.fields_len + 31) / 32; // comptime bits + if (!info.flags.is_extern) + ints += info.fields_len; // runtime order + ints += info.fields_len; // offsets + break :b @sizeOf(u32) * ints; + }, + .type_struct_anon => b: { + const info = ip.extraData(TypeStructAnon, data); + break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len); + }, + .type_struct_packed => b: { + const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); + const captures_len = if (extra.data.flags.any_captures) + ip.extra.items[extra.end] + else + 0; + break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + + @intFromBool(extra.data.flags.any_captures) + captures_len + + extra.data.fields_len * 2); + }, + .type_struct_packed_inits => b: { + const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); + const captures_len = if (extra.data.flags.any_captures) + ip.extra.items[extra.end] + else + 0; + break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + + @intFromBool(extra.data.flags.any_captures) + captures_len + + extra.data.fields_len * 3); + }, + .type_tuple_anon => b: { + const info = ip.extraData(TypeStructAnon, data); + break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len); + }, - .type_function => b: { - const info = ip.extraData(Tag.TypeFunction, data); - break :b @sizeOf(Tag.TypeFunction) + - (@sizeOf(Index) * info.params_len) + - (@as(u32, 4) * @intFromBool(info.flags.has_comptime_bits)) + - (@as(u32, 4) * @intFromBool(info.flags.has_noalias_bits)); - }, + .type_union => b: { + const extra = ip.extraDataTrail(Tag.TypeUnion, data); + const captures_len = if (extra.data.flags.any_captures) + ip.extra.items[extra.end] + else + 0; + const per_field = @sizeOf(u32); // field type + // 1 byte per field for alignment, rounded up to the nearest 4 bytes + const alignments = if (extra.data.flags.any_aligned_fields) + ((extra.data.fields_len + 3) / 4) * 4 + else + 0; + break :b @sizeOf(Tag.TypeUnion) + + 4 * (@intFromBool(extra.data.flags.any_captures) + captures_len) + + (extra.data.fields_len * per_field) + alignments; + }, - .undef => 0, - .simple_type => 0, - .simple_value => 0, - .ptr_decl => @sizeOf(PtrDecl), - .ptr_comptime_alloc => @sizeOf(PtrComptimeAlloc), - .ptr_anon_decl => @sizeOf(PtrAnonDecl), - .ptr_anon_decl_aligned => @sizeOf(PtrAnonDeclAligned), - .ptr_comptime_field => @sizeOf(PtrComptimeField), - .ptr_int => @sizeOf(PtrInt), - .ptr_eu_payload => @sizeOf(PtrBase), - .ptr_opt_payload => @sizeOf(PtrBase), - .ptr_elem => @sizeOf(PtrBaseIndex), - .ptr_field => @sizeOf(PtrBaseIndex), - .ptr_slice => @sizeOf(PtrSlice), - .opt_null => 0, - .opt_payload => @sizeOf(Tag.TypeValue), - .int_u8 => 0, - .int_u16 => 0, - .int_u32 => 0, - .int_i32 => 0, - .int_usize => 0, - .int_comptime_int_u32 => 0, - .int_comptime_int_i32 => 0, - .int_small => @sizeOf(IntSmall), + .type_function => b: { + const info = ip.extraData(Tag.TypeFunction, data); + break :b @sizeOf(Tag.TypeFunction) + + (@sizeOf(Index) * info.params_len) + + (@as(u32, 4) * @intFromBool(info.flags.has_comptime_bits)) + + (@as(u32, 4) * @intFromBool(info.flags.has_noalias_bits)); + }, - .int_positive, - .int_negative, - => b: { - const int = ip.limbData(Int, data); - break :b @sizeOf(Int) + int.limbs_len * 8; - }, + .undef => 0, + .simple_type => 0, + .simple_value => 0, + .ptr_decl => @sizeOf(PtrDecl), + .ptr_comptime_alloc => @sizeOf(PtrComptimeAlloc), + .ptr_anon_decl => @sizeOf(PtrAnonDecl), + .ptr_anon_decl_aligned => @sizeOf(PtrAnonDeclAligned), + .ptr_comptime_field => @sizeOf(PtrComptimeField), + .ptr_int => @sizeOf(PtrInt), + .ptr_eu_payload => @sizeOf(PtrBase), + .ptr_opt_payload => @sizeOf(PtrBase), + .ptr_elem => @sizeOf(PtrBaseIndex), + .ptr_field => @sizeOf(PtrBaseIndex), + .ptr_slice => @sizeOf(PtrSlice), + .opt_null => 0, + .opt_payload => @sizeOf(Tag.TypeValue), + .int_u8 => 0, + .int_u16 => 0, + .int_u32 => 0, + .int_i32 => 0, + .int_usize => 0, + .int_comptime_int_u32 => 0, + .int_comptime_int_i32 => 0, + .int_small => @sizeOf(IntSmall), + + .int_positive, + .int_negative, + => b: { + const int = ip.limbData(Int, data); + break :b @sizeOf(Int) + int.limbs_len * 8; + }, - .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy), + .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy), - .error_set_error, .error_union_error => @sizeOf(Key.Error), - .error_union_payload => @sizeOf(Tag.TypeValue), - .enum_literal => 0, - .enum_tag => @sizeOf(Tag.EnumTag), + .error_set_error, .error_union_error => @sizeOf(Key.Error), + .error_union_payload => @sizeOf(Tag.TypeValue), + .enum_literal => 0, + .enum_tag => @sizeOf(Tag.EnumTag), - .bytes => b: { - const info = ip.extraData(Bytes, data); - const len: usize = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); - break :b @sizeOf(Bytes) + len + @intFromBool(info.bytes.at(len - 1, ip) != 0); - }, - .aggregate => b: { - const info = ip.extraData(Tag.Aggregate, data); - const fields_len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); - break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len); - }, - .repeated => @sizeOf(Repeated), - - .float_f16 => 0, - .float_f32 => 0, - .float_f64 => @sizeOf(Float64), - .float_f80 => @sizeOf(Float80), - .float_f128 => @sizeOf(Float128), - .float_c_longdouble_f80 => @sizeOf(Float80), - .float_c_longdouble_f128 => @sizeOf(Float128), - .float_comptime_float => @sizeOf(Float128), - .variable => @sizeOf(Tag.Variable), - .extern_func => @sizeOf(Tag.ExternFunc), - .func_decl => @sizeOf(Tag.FuncDecl), - .func_instance => b: { - const info = ip.extraData(Tag.FuncInstance, data); - const ty = ip.typeOf(info.generic_owner); - const params_len = ip.indexToKey(ty).func_type.param_types.len; - break :b @sizeOf(Tag.FuncInstance) + @sizeOf(Index) * params_len; - }, - .func_coerced => @sizeOf(Tag.FuncCoerced), - .only_possible_value => 0, - .union_value => @sizeOf(Key.Union), + .bytes => b: { + const info = ip.extraData(Bytes, data); + const len: usize = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); + break :b @sizeOf(Bytes) + len + @intFromBool(info.bytes.at(len - 1, ip) != 0); + }, + .aggregate => b: { + const info = ip.extraData(Tag.Aggregate, data); + const fields_len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); + break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len); + }, + .repeated => @sizeOf(Repeated), + + .float_f16 => 0, + .float_f32 => 0, + .float_f64 => @sizeOf(Float64), + .float_f80 => @sizeOf(Float80), + .float_f128 => @sizeOf(Float128), + .float_c_longdouble_f80 => @sizeOf(Float80), + .float_c_longdouble_f128 => @sizeOf(Float128), + .float_comptime_float => @sizeOf(Float128), + .variable => @sizeOf(Tag.Variable), + .extern_func => @sizeOf(Tag.ExternFunc), + .func_decl => @sizeOf(Tag.FuncDecl), + .func_instance => b: { + const info = ip.extraData(Tag.FuncInstance, data); + const ty = ip.typeOf(info.generic_owner); + const params_len = ip.indexToKey(ty).func_type.param_types.len; + break :b @sizeOf(Tag.FuncInstance) + @sizeOf(Index) * params_len; + }, + .func_coerced => @sizeOf(Tag.FuncCoerced), + .only_possible_value => 0, + .union_value => @sizeOf(Key.Union), - .memoized_call => b: { - const info = ip.extraData(MemoizedCall, data); - break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len); - }, - }); + .memoized_call => b: { + const info = ip.extraData(MemoizedCall, data); + break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len); + }, + }); + } } const SortContext = struct { map: *std.AutoArrayHashMap(Tag, TagStats), @@ -9031,97 +9152,103 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { } fn dumpAllFallible(ip: *const InternPool) anyerror!void { - const tags = ip.items.items(.tag); - const datas = ip.items.items(.data); var bw = std.io.bufferedWriter(std.io.getStdErr().writer()); const w = bw.writer(); - for (tags, datas, 0..) |tag, data, i| { - try w.print("${d} = {s}(", .{ i, @tagName(tag) }); - switch (tag) { - .removed => {}, - - .simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(data)))}), - .simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(data)))}), - - .type_int_signed, - .type_int_unsigned, - .type_array_small, - .type_array_big, - .type_vector, - .type_pointer, - .type_optional, - .type_anyframe, - .type_error_union, - .type_anyerror_union, - .type_error_set, - .type_inferred_error_set, - .type_enum_explicit, - .type_enum_nonexhaustive, - .type_enum_auto, - .type_opaque, - .type_struct, - .type_struct_anon, - .type_struct_packed, - .type_struct_packed_inits, - .type_tuple_anon, - .type_union, - .type_function, - .undef, - .ptr_decl, - .ptr_comptime_alloc, - .ptr_anon_decl, - .ptr_anon_decl_aligned, - .ptr_comptime_field, - .ptr_int, - .ptr_eu_payload, - .ptr_opt_payload, - .ptr_elem, - .ptr_field, - .ptr_slice, - .opt_payload, - .int_u8, - .int_u16, - .int_u32, - .int_i32, - .int_usize, - .int_comptime_int_u32, - .int_comptime_int_i32, - .int_small, - .int_positive, - .int_negative, - .int_lazy_align, - .int_lazy_size, - .error_set_error, - .error_union_error, - .error_union_payload, - .enum_literal, - .enum_tag, - .bytes, - .aggregate, - .repeated, - .float_f16, - .float_f32, - .float_f64, - .float_f80, - .float_f128, - .float_c_longdouble_f80, - .float_c_longdouble_f128, - .float_comptime_float, - .variable, - .extern_func, - .func_decl, - .func_instance, - .func_coerced, - .union_value, - .memoized_call, - => try w.print("{d}", .{data}), - - .opt_null, - .type_slice, - .only_possible_value, - => try w.print("${d}", .{data}), + for (ip.locals, 0..) |*local, tid| { + const items = local.shared.items.view(); + for ( + items.items(.tag)[0..local.mutate.items.len], + items.items(.data)[0..local.mutate.items.len], + 0.., + ) |tag, data, index| { + const i = Index.Unwrapped.wrap(.{ .tid = @enumFromInt(tid), .index = @intCast(index) }, ip); + try w.print("${d} = {s}(", .{ i, @tagName(tag) }); + switch (tag) { + .removed => {}, + + .simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(data)))}), + .simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(data)))}), + + .type_int_signed, + .type_int_unsigned, + .type_array_small, + .type_array_big, + .type_vector, + .type_pointer, + .type_optional, + .type_anyframe, + .type_error_union, + .type_anyerror_union, + .type_error_set, + .type_inferred_error_set, + .type_enum_explicit, + .type_enum_nonexhaustive, + .type_enum_auto, + .type_opaque, + .type_struct, + .type_struct_anon, + .type_struct_packed, + .type_struct_packed_inits, + .type_tuple_anon, + .type_union, + .type_function, + .undef, + .ptr_decl, + .ptr_comptime_alloc, + .ptr_anon_decl, + .ptr_anon_decl_aligned, + .ptr_comptime_field, + .ptr_int, + .ptr_eu_payload, + .ptr_opt_payload, + .ptr_elem, + .ptr_field, + .ptr_slice, + .opt_payload, + .int_u8, + .int_u16, + .int_u32, + .int_i32, + .int_usize, + .int_comptime_int_u32, + .int_comptime_int_i32, + .int_small, + .int_positive, + .int_negative, + .int_lazy_align, + .int_lazy_size, + .error_set_error, + .error_union_error, + .error_union_payload, + .enum_literal, + .enum_tag, + .bytes, + .aggregate, + .repeated, + .float_f16, + .float_f32, + .float_f64, + .float_f80, + .float_f128, + .float_c_longdouble_f80, + .float_c_longdouble_f128, + .float_comptime_float, + .variable, + .extern_func, + .func_decl, + .func_instance, + .func_coerced, + .union_value, + .memoized_call, + => try w.print("{d}", .{data}), + + .opt_null, + .type_slice, + .only_possible_value, + => try w.print("${d}", .{data}), + } + try w.writeAll(")\n"); } - try w.writeAll(")\n"); } try bw.flush(); } @@ -9139,15 +9266,24 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) const w = bw.writer(); var instances: std.AutoArrayHashMapUnmanaged(Index, std.ArrayListUnmanaged(Index)) = .{}; - const datas = ip.items.items(.data); - for (ip.items.items(.tag), 0..) |tag, i| { - if (tag != .func_instance) continue; - const info = ip.extraData(Tag.FuncInstance, datas[i]); - - const gop = try instances.getOrPut(arena, info.generic_owner); - if (!gop.found_existing) gop.value_ptr.* = .{}; - - try gop.value_ptr.append(arena, @enumFromInt(i)); + for (ip.locals, 0..) |*local, tid| { + const items = local.shared.items.view(); + for ( + items.items(.tag)[0..local.mutate.items.len], + items.items(.data)[0..local.mutate.items.len], + 0.., + ) |tag, data, index| { + if (tag != .func_instance) continue; + const info = ip.extraData(Tag.FuncInstance, data); + + const gop = try instances.getOrPut(arena, info.generic_owner); + if (!gop.found_existing) gop.value_ptr.* = .{}; + + try gop.value_ptr.append( + arena, + Index.Unwrapped.wrap(.{ .tid = @enumFromInt(tid), .index = @intCast(index) }, ip), + ); + } } const SortContext = struct { @@ -9163,7 +9299,7 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) const generic_fn_owner_decl = ip.declPtrConst(ip.funcDeclOwner(entry.key_ptr.*)); try w.print("{} ({}): \n", .{ generic_fn_owner_decl.name.fmt(ip), entry.value_ptr.items.len }); for (entry.value_ptr.items) |index| { - const func = ip.extraFuncInstance(datas[@intFromEnum(index)]); + const func = ip.extraFuncInstance(index.getData(ip)); const owner_decl = ip.declPtrConst(func.owner_decl); try w.print(" {}: (", .{owner_decl.name.fmt(ip)}); for (func.comptime_args.get(ip)) |arg| { @@ -9518,7 +9654,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { // This optimization on tags is needed so that indexToKey can call // typeOf without being recursive. - _ => switch (ip.items.items(.tag)[@intFromEnum(index)]) { + _ => switch (index.getTag(ip)) { .removed => unreachable, .type_int_signed, @@ -9551,7 +9687,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .undef, .opt_null, .only_possible_value, - => @enumFromInt(ip.items.items(.data)[@intFromEnum(index)]), + => @enumFromInt(index.getData(ip)), .simple_value => unreachable, // handled via Index above @@ -9584,7 +9720,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .aggregate, .repeated, => |t| { - const extra_index = ip.items.items(.data)[@intFromEnum(index)]; + const extra_index = index.getData(ip); const field_index = std.meta.fieldIndex(t.Payload(), "ty").?; return @enumFromInt(ip.extra.items[extra_index + field_index]); }, @@ -9602,7 +9738,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { // Note these are stored in limbs data, not extra data. .int_positive, .int_negative, - => ip.limbData(Int, ip.items.items(.data)[@intFromEnum(index)]).ty, + => ip.limbData(Int, index.getData(ip)).ty, .enum_literal => .enum_literal_type, .float_f16 => .f16_type, @@ -9651,11 +9787,11 @@ pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 { } pub fn funcTypeReturnType(ip: *const InternPool, ty: Index) Index { - const item = ip.items.get(@intFromEnum(ty)); + const item = ty.getItem(ip); const child_item = switch (item.tag) { - .type_pointer => ip.items.get(ip.extra.items[ + .type_pointer => @as(Index, @enumFromInt(ip.extra.items[ item.data + std.meta.fieldIndex(Tag.TypePointer, "child").? - ]), + ])).getItem(ip), .type_function => item, else => unreachable, }; @@ -9668,47 +9804,47 @@ pub fn funcTypeReturnType(ip: *const InternPool, ty: Index) Index { pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { return switch (ty) { .noreturn_type => true, - else => switch (ip.items.items(.tag)[@intFromEnum(ty)]) { - .type_error_set => ip.extra.items[ip.items.items(.data)[@intFromEnum(ty)] + std.meta.fieldIndex(Tag.ErrorSet, "names_len").?] == 0, + else => switch (ty.getTag(ip)) { + .type_error_set => ip.extra.items[ty.getData(ip) + std.meta.fieldIndex(Tag.ErrorSet, "names_len").?] == 0, else => false, }, }; } pub fn isUndef(ip: *const InternPool, val: Index) bool { - return val == .undef or ip.items.items(.tag)[@intFromEnum(val)] == .undef; + return val == .undef or val.getTag(ip) == .undef; } pub fn isVariable(ip: *const InternPool, val: Index) bool { - return ip.items.items(.tag)[@intFromEnum(val)] == .variable; + return val.getTag(ip) == .variable; } pub fn getBackingDecl(ip: *const InternPool, val: Index) OptionalDeclIndex { - var base = @intFromEnum(val); + var base = val; while (true) { - switch (ip.items.items(.tag)[base]) { + switch (base.getTag(ip)) { .ptr_decl => return @enumFromInt(ip.extra.items[ - ip.items.items(.data)[base] + std.meta.fieldIndex(PtrDecl, "decl").? + base.getData(ip) + std.meta.fieldIndex(PtrDecl, "decl").? ]), inline .ptr_eu_payload, .ptr_opt_payload, .ptr_elem, .ptr_field, - => |tag| base = ip.extra.items[ - ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "base").? - ], - .ptr_slice => base = ip.extra.items[ - ip.items.items(.data)[base] + std.meta.fieldIndex(PtrSlice, "ptr").? - ], + => |tag| base = @enumFromInt(ip.extra.items[ + base.getData(ip) + std.meta.fieldIndex(tag.Payload(), "base").? + ]), + .ptr_slice => base = @enumFromInt(ip.extra.items[ + base.getData(ip) + std.meta.fieldIndex(PtrSlice, "ptr").? + ]), else => return .none, } } } pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.BaseAddr.Tag { - var base = @intFromEnum(val); + var base = val; while (true) { - switch (ip.items.items(.tag)[base]) { + switch (base.getTag(ip)) { .ptr_decl => return .decl, .ptr_comptime_alloc => return .comptime_alloc, .ptr_anon_decl, @@ -9720,12 +9856,12 @@ pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.BaseAddr.Ta .ptr_opt_payload, .ptr_elem, .ptr_field, - => |tag| base = ip.extra.items[ - ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "base").? - ], - inline .ptr_slice => |tag| base = ip.extra.items[ - ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "ptr").? - ], + => |tag| base = @enumFromInt(ip.extra.items[ + base.getData(ip) + std.meta.fieldIndex(tag.Payload(), "base").? + ]), + inline .ptr_slice => |tag| base = @enumFromInt(ip.extra.items[ + base.getData(ip) + std.meta.fieldIndex(tag.Payload(), "ptr").? + ]), else => return null, } } @@ -9834,7 +9970,7 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .empty_struct => unreachable, .generic_poison => unreachable, - _ => switch (ip.items.items(.tag)[@intFromEnum(index)]) { + _ => switch (index.getTag(ip)) { .removed => unreachable, .type_int_signed, @@ -9941,24 +10077,22 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois }; } -pub fn isFuncBody(ip: *const InternPool, i: Index) bool { - assert(i != .none); - return switch (ip.items.items(.tag)[@intFromEnum(i)]) { +pub fn isFuncBody(ip: *const InternPool, index: Index) bool { + return switch (index.getTag(ip)) { .func_decl, .func_instance, .func_coerced => true, else => false, }; } -pub fn funcAnalysis(ip: *const InternPool, i: Index) *FuncAnalysis { - assert(i != .none); - const item = ip.items.get(@intFromEnum(i)); +pub fn funcAnalysis(ip: *const InternPool, index: Index) *FuncAnalysis { + const item = index.getItem(ip); const extra_index = switch (item.tag) { .func_decl => item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, .func_instance => item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, .func_coerced => i: { const extra_index = item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?; const func_index: Index = @enumFromInt(ip.extra.items[extra_index]); - const sub_item = ip.items.get(@intFromEnum(func_index)); + const sub_item = func_index.getItem(ip); break :i switch (sub_item.tag) { .func_decl => sub_item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, .func_instance => sub_item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, @@ -9974,22 +10108,21 @@ pub fn funcHasInferredErrorSet(ip: *const InternPool, i: Index) bool { return funcAnalysis(ip, i).inferred_error_set; } -pub fn funcZirBodyInst(ip: *const InternPool, i: Index) TrackedInst.Index { - assert(i != .none); - const item = ip.items.get(@intFromEnum(i)); +pub fn funcZirBodyInst(ip: *const InternPool, index: Index) TrackedInst.Index { + const item = index.getItem(ip); const zir_body_inst_field_index = std.meta.fieldIndex(Tag.FuncDecl, "zir_body_inst").?; const extra_index = switch (item.tag) { .func_decl => item.data + zir_body_inst_field_index, - .func_instance => b: { + .func_instance => ei: { const generic_owner_field_index = std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?; - const func_decl_index = ip.extra.items[item.data + generic_owner_field_index]; - assert(ip.items.items(.tag)[func_decl_index] == .func_decl); - break :b ip.items.items(.data)[func_decl_index] + zir_body_inst_field_index; + const func_decl_index: Index = @enumFromInt(ip.extra.items[item.data + generic_owner_field_index]); + const func_decl_item = func_decl_index.getItem(ip); + assert(func_decl_item.tag == .func_decl); + break :ei func_decl_item.data + zir_body_inst_field_index; }, .func_coerced => { - const datas = ip.items.items(.data); const uncoerced_func_index: Index = @enumFromInt(ip.extra.items[ - datas[@intFromEnum(i)] + std.meta.fieldIndex(Tag.FuncCoerced, "func").? + item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").? ]); return ip.funcZirBodyInst(uncoerced_func_index); }, @@ -9999,15 +10132,14 @@ pub fn funcZirBodyInst(ip: *const InternPool, i: Index) TrackedInst.Index { } pub fn iesFuncIndex(ip: *const InternPool, ies_index: Index) Index { - assert(ies_index != .none); - const tags = ip.items.items(.tag); - assert(tags[@intFromEnum(ies_index)] == .type_inferred_error_set); - const func_index = ip.items.items(.data)[@intFromEnum(ies_index)]; - switch (tags[func_index]) { + const item = ies_index.getItem(ip); + assert(item.tag == .type_inferred_error_set); + const func_index: Index = @enumFromInt(item.data); + switch (func_index.getTag(ip)) { .func_decl, .func_instance => {}, else => unreachable, // assertion failed } - return @enumFromInt(func_index); + return func_index; } /// Returns a mutable pointer to the resolved error set type of an inferred @@ -10026,21 +10158,19 @@ pub fn iesResolved(ip: *const InternPool, ies_index: Index) *Index { /// error set function. The returned pointer is invalidated when anything is /// added to `ip`. pub fn funcIesResolved(ip: *const InternPool, func_index: Index) *Index { - const tags = ip.items.items(.tag); - const datas = ip.items.items(.data); assert(funcHasInferredErrorSet(ip, func_index)); - const func_start = datas[@intFromEnum(func_index)]; - const extra_index = switch (tags[@intFromEnum(func_index)]) { - .func_decl => func_start + @typeInfo(Tag.FuncDecl).Struct.fields.len, - .func_instance => func_start + @typeInfo(Tag.FuncInstance).Struct.fields.len, + const func_item = func_index.getItem(ip); + const extra_index = switch (func_item.tag) { + .func_decl => func_item.data + @typeInfo(Tag.FuncDecl).Struct.fields.len, + .func_instance => func_item.data + @typeInfo(Tag.FuncInstance).Struct.fields.len, .func_coerced => i: { const uncoerced_func_index: Index = @enumFromInt(ip.extra.items[ - func_start + std.meta.fieldIndex(Tag.FuncCoerced, "func").? + func_item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").? ]); - const uncoerced_func_start = datas[@intFromEnum(uncoerced_func_index)]; - break :i switch (tags[@intFromEnum(uncoerced_func_index)]) { - .func_decl => uncoerced_func_start + @typeInfo(Tag.FuncDecl).Struct.fields.len, - .func_instance => uncoerced_func_start + @typeInfo(Tag.FuncInstance).Struct.fields.len, + const uncoerced_func_item = uncoerced_func_index.getItem(ip); + break :i switch (uncoerced_func_item.tag) { + .func_decl => uncoerced_func_item.data + @typeInfo(Tag.FuncDecl).Struct.fields.len, + .func_instance => uncoerced_func_item.data + @typeInfo(Tag.FuncInstance).Struct.fields.len, else => unreachable, }; }, @@ -10049,35 +10179,28 @@ pub fn funcIesResolved(ip: *const InternPool, func_index: Index) *Index { return @ptrCast(&ip.extra.items[extra_index]); } -pub fn funcDeclInfo(ip: *const InternPool, i: Index) Key.Func { - const tags = ip.items.items(.tag); - const datas = ip.items.items(.data); - assert(tags[@intFromEnum(i)] == .func_decl); - return extraFuncDecl(ip, datas[@intFromEnum(i)]); +pub fn funcDeclInfo(ip: *const InternPool, index: Index) Key.Func { + const item = index.getItem(ip); + assert(item.tag == .func_decl); + return extraFuncDecl(ip, item.data); } -pub fn funcDeclOwner(ip: *const InternPool, i: Index) DeclIndex { - return funcDeclInfo(ip, i).owner_decl; +pub fn funcDeclOwner(ip: *const InternPool, index: Index) DeclIndex { + return funcDeclInfo(ip, index).owner_decl; } -pub fn funcTypeParamsLen(ip: *const InternPool, i: Index) u32 { - const tags = ip.items.items(.tag); - const datas = ip.items.items(.data); - assert(tags[@intFromEnum(i)] == .type_function); - const start = datas[@intFromEnum(i)]; - return ip.extra.items[start + std.meta.fieldIndex(Tag.TypeFunction, "params_len").?]; +pub fn funcTypeParamsLen(ip: *const InternPool, index: Index) u32 { + const item = index.getItem(ip); + assert(item.tag == .type_function); + return ip.extra.items[item.data + std.meta.fieldIndex(Tag.TypeFunction, "params_len").?]; } -pub fn unwrapCoercedFunc(ip: *const InternPool, i: Index) Index { - const tags = ip.items.items(.tag); - return switch (tags[@intFromEnum(i)]) { - .func_coerced => { - const datas = ip.items.items(.data); - return @enumFromInt(ip.extra.items[ - datas[@intFromEnum(i)] + std.meta.fieldIndex(Tag.FuncCoerced, "func").? - ]); - }, - .func_instance, .func_decl => i, +pub fn unwrapCoercedFunc(ip: *const InternPool, index: Index) Index { + return switch (index.getTag(ip)) { + .func_coerced => @enumFromInt(ip.extra.items[ + index.getData(ip) + std.meta.fieldIndex(Tag.FuncCoerced, "func").? + ]), + .func_instance, .func_decl => index, else => unreachable, }; } @@ -10085,7 +10208,12 @@ pub fn unwrapCoercedFunc(ip: *const InternPool, i: Index) Index { /// Having resolved a builtin type to a real struct/union/enum (which is now at `resolverd_index`), /// make `want_index` refer to this type instead. This invalidates `resolved_index`, so must be /// called only when it is guaranteed that no reference to `resolved_index` exists. -pub fn resolveBuiltinType(ip: *InternPool, want_index: Index, resolved_index: Index) void { +pub fn resolveBuiltinType( + ip: *InternPool, + tid: Zcu.PerThread.Id, + want_index: Index, + resolved_index: Index, +) void { assert(@intFromEnum(want_index) >= @intFromEnum(Index.first_type)); assert(@intFromEnum(want_index) <= @intFromEnum(Index.last_type)); @@ -10097,20 +10225,12 @@ pub fn resolveBuiltinType(ip: *InternPool, want_index: Index, resolved_index: In (ip.zigTypeTagOrPoison(resolved_index) catch unreachable)); // Copy the data - const item = ip.items.get(@intFromEnum(resolved_index)); - ip.items.set(@intFromEnum(want_index), item); - - if (std.debug.runtime_safety) { - // Make the value unreachable - this is a weird value which will make (incorrect) existing - // references easier to spot - ip.items.set(@intFromEnum(resolved_index), .{ - .tag = .simple_value, - .data = @intFromEnum(SimpleValue.@"unreachable"), - }); - } else { - // Here we could add the index to a free-list for reuse, but since - // there is so little garbage created this way it's not worth it. - } + const resolved_item = resolved_index.getItem(ip); + const want_unwrapped = want_index.unwrap(ip); + if (tid != .main or want_unwrapped.tid != .main) @panic("This operation is impossible to be thread-safe"); + var want_items = ip.getLocalShared(want_unwrapped.tid).items.view(); + want_items.set(want_unwrapped.index, resolved_item); + ip.remove(tid, resolved_index); } pub fn anonStructFieldTypes(ip: *const InternPool, i: Index) []const Index { diff --git a/src/Sema.zig b/src/Sema.zig index ee4ac3b703..34db457955 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2717,10 +2717,11 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us /// Given an `InternPool.WipNamespaceType` or `InternPool.WipEnumType`, apply /// `sema.builtin_type_target_index` to it if necessary. fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) { + const pt = sema.pt; if (sema.builtin_type_target_index == .none) return wip_ty; var new = wip_ty; new.index = sema.builtin_type_target_index; - sema.pt.zcu.intern_pool.resolveBuiltinType(new.index, wip_ty.index); + pt.zcu.intern_pool.resolveBuiltinType(pt.tid, new.index, wip_ty.index); return new; } @@ -2740,7 +2741,7 @@ fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { if (!was_outdated) return false; _ = zcu.outdated_ready.swapRemove(decl_as_depender); zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); - zcu.intern_pool.remove(ty); + zcu.intern_pool.remove(pt.tid, ty); zcu.declPtr(decl_index).analysis = .dependency_failure; try zcu.markDependeeOutdated(.{ .decl_val = decl_index }); return true; @@ -2819,7 +2820,7 @@ fn zirStructDecl( }, .wip => |wip| wip, }); - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); const new_decl_index = try sema.createAnonymousDeclTypeNamed( block, @@ -3056,7 +3057,7 @@ fn zirEnumDecl( // have finished constructing the type and are in the process of analyzing it. var done = false; - errdefer if (!done) wip_ty.cancel(ip); + errdefer if (!done) wip_ty.cancel(ip, pt.tid); const new_decl_index = try sema.createAnonymousDeclTypeNamed( block, @@ -3324,7 +3325,7 @@ fn zirUnionDecl( }, .wip => |wip| wip, }); - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); const new_decl_index = try sema.createAnonymousDeclTypeNamed( block, @@ -3414,7 +3415,7 @@ fn zirOpaqueDecl( }, .wip => |wip| wip, }; - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); const new_decl_index = try sema.createAnonymousDeclTypeNamed( block, @@ -21705,7 +21706,7 @@ fn zirReify( .existing => |ty| return Air.internedToRef(ty), .wip => |wip| wip, }; - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); const new_decl_index = try sema.createAnonymousDeclTypeNamed( block, @@ -21901,7 +21902,7 @@ fn reifyEnum( .wip => |wip| wip, .existing => |ty| return Air.internedToRef(ty), }; - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); if (tag_ty.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); @@ -22052,7 +22053,7 @@ fn reifyUnion( .wip => |wip| wip, .existing => |ty| return Air.internedToRef(ty), }; - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); const new_decl_index = try sema.createAnonymousDeclTypeNamed( block, @@ -22158,7 +22159,7 @@ fn reifyUnion( const enum_tag_ty = try sema.generateUnionTagTypeSimple(block, field_names.keys(), mod.declPtr(new_decl_index)); break :tag_ty .{ enum_tag_ty, false }; }; - errdefer if (!has_explicit_tag) ip.remove(enum_tag_ty); // remove generated tag type on error + errdefer if (!has_explicit_tag) ip.remove(pt.tid, enum_tag_ty); // remove generated tag type on error for (field_types) |field_ty_ip| { const field_ty = Type.fromInterned(field_ty_ip); @@ -22305,7 +22306,7 @@ fn reifyStruct( .wip => |wip| wip, .existing => |ty| return Air.internedToRef(ty), }; - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); if (is_tuple) switch (layout) { .@"extern" => return sema.fail(block, src, "extern tuples are not supported", .{}), @@ -36924,7 +36925,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .none, => unreachable, - _ => switch (ip.items.items(.tag)[@intFromEnum(ty.toIntern())]) { + _ => switch (ty.toIntern().getTag(ip)) { .removed => unreachable, .type_int_signed, // i0 handled above diff --git a/src/Type.zig b/src/Type.zig index 0bb8f3f144..ba53535d40 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -3686,7 +3686,7 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void { .empty_struct => unreachable, .generic_poison => unreachable, - else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) { + else => switch (ty_ip.getTag(ip)) { .type_struct, .type_struct_packed, .type_struct_packed_inits, diff --git a/src/Zcu.zig b/src/Zcu.zig index 04ba7cc328..b855e4fcf0 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -3071,42 +3071,6 @@ pub const SemaDeclResult = packed struct { invalidate_decl_ref: bool, }; -pub fn semaAnonOwnerDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult { - const decl = zcu.declPtr(decl_index); - - assert(decl.has_tv); - assert(decl.owns_tv); - - log.debug("semaAnonOwnerDecl '{d}'", .{@intFromEnum(decl_index)}); - - switch (decl.typeOf(zcu).zigTypeTag(zcu)) { - .Fn => @panic("TODO: update fn instance"), - .Type => {}, - else => unreachable, - } - - // We are the owner Decl of a type, and we were marked as outdated. That means the *structure* - // of this type changed; not just its namespace. Therefore, we need a new InternPool index. - // - // However, as soon as we make that, the context that created us will require re-analysis anyway - // (as it depends on this Decl's value), meaning the `struct_decl` (or equivalent) instruction - // will be analyzed again. Since Sema already needs to be able to reconstruct types like this, - // why should we bother implementing it here too when the Sema logic will be hit right after? - // - // So instead, let's just mark this Decl as failed - so that any remaining Decls which genuinely - // reference it (via `@This`) end up silently erroring too - and we'll let Sema make a new type - // with a new Decl. - // - // Yes, this does mean that any type owner Decl has a constant value for its entire lifetime. - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); - zcu.intern_pool.remove(decl.val.toIntern()); - decl.analysis = .dependency_failure; - return .{ - .invalidate_decl_val = true, - .invalidate_decl_ref = true, - }; -} - pub const ImportFileResult = struct { file: *File, file_index: File.Index, diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 2d2be29909..1233275a26 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -574,7 +574,7 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter if (decl.val.ip_index != func_index) { try zcu.markDependeeOutdated(.{ .func_ies = func_index }); ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); - ip.remove(func_index); + ip.remove(pt.tid, func_index); @panic("TODO: remove orphaned function from binary"); } @@ -823,7 +823,7 @@ fn getFileRootStruct( .existing => unreachable, // we wouldn't be analysing the file root if this type existed .wip => |wip| wip, }; - errdefer wip_ty.cancel(ip); + errdefer wip_ty.cancel(ip, pt.tid); if (zcu.comp.debug_incremental) { try ip.addDependency( @@ -885,7 +885,7 @@ fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: ip.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = file_root_decl, })); - ip.remove(decl.val.toIntern()); + ip.remove(pt.tid, decl.val.toIntern()); decl.val = undefined; _ = try pt.getFileRootStruct(file_root_decl, decl.src_namespace, file_index); return true; @@ -959,7 +959,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { assert(file.zir_loaded); const struct_ty = try pt.getFileRootStruct(new_decl_index, new_namespace_index, file_index); - errdefer zcu.intern_pool.remove(struct_ty); + errdefer zcu.intern_pool.remove(pt.tid, struct_ty); switch (zcu.comp.cache_use) { .whole => |whole| if (whole.cache_manifest) |man| { @@ -1002,7 +1002,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { if (decl.zir_decl_index == .none and decl.owns_tv) { // We are re-analyzing an anonymous owner Decl (for a function or a namespace type). - return zcu.semaAnonOwnerDecl(decl_index); + return pt.semaAnonOwnerDecl(decl_index); } log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)}); @@ -1270,6 +1270,43 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { return result; } +pub fn semaAnonOwnerDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { + const zcu = pt.zcu; + const decl = zcu.declPtr(decl_index); + + assert(decl.has_tv); + assert(decl.owns_tv); + + log.debug("semaAnonOwnerDecl '{d}'", .{@intFromEnum(decl_index)}); + + switch (decl.typeOf(zcu).zigTypeTag(zcu)) { + .Fn => @panic("TODO: update fn instance"), + .Type => {}, + else => unreachable, + } + + // We are the owner Decl of a type, and we were marked as outdated. That means the *structure* + // of this type changed; not just its namespace. Therefore, we need a new InternPool index. + // + // However, as soon as we make that, the context that created us will require re-analysis anyway + // (as it depends on this Decl's value), meaning the `struct_decl` (or equivalent) instruction + // will be analyzed again. Since Sema already needs to be able to reconstruct types like this, + // why should we bother implementing it here too when the Sema logic will be hit right after? + // + // So instead, let's just mark this Decl as failed - so that any remaining Decls which genuinely + // reference it (via `@This`) end up silently erroring too - and we'll let Sema make a new type + // with a new Decl. + // + // Yes, this does mean that any type owner Decl has a constant value for its entire lifetime. + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + zcu.intern_pool.remove(pt.tid, decl.val.toIntern()); + decl.analysis = .dependency_failure; + return .{ + .invalidate_decl_val = true, + .invalidate_decl_ref = true, + }; +} + pub fn embedFile( pt: Zcu.PerThread, cur_file: *Zcu.File, -- cgit v1.2.3 From bdae01ab047accbbc6dcd014d008f2554aa78696 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 7 Jul 2024 07:33:09 -0400 Subject: InternPool: implement and use thread-safe list for extra and limbs --- lib/std/Thread/Pool.zig | 21 +- src/Compilation.zig | 12 +- src/InternPool.zig | 1930 ++++++++++++++++++++++++++--------------------- src/Sema.zig | 2 +- src/Type.zig | 2 +- src/Value.zig | 5 +- src/Zcu/PerThread.zig | 2 +- src/main.zig | 44 +- 8 files changed, 1133 insertions(+), 885 deletions(-) (limited to 'src/Compilation.zig') diff --git a/lib/std/Thread/Pool.zig b/lib/std/Thread/Pool.zig index 5972c4111a..d501b66520 100644 --- a/lib/std/Thread/Pool.zig +++ b/lib/std/Thread/Pool.zig @@ -21,7 +21,7 @@ const Runnable = struct { runFn: RunProto, }; -const RunProto = *const fn (*Runnable, id: ?usize) void; +const RunProto = *const fn (*Runnable, id: ?u32) void; pub const Options = struct { allocator: std.mem.Allocator, @@ -109,7 +109,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } }, wait_group: *WaitGroup, - fn runFn(runnable: *Runnable, _: ?usize) void { + fn runFn(runnable: *Runnable, _: ?u32) void { const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable); const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node)); @call(.auto, func, closure.arguments); @@ -150,7 +150,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args /// Runs `func` in the thread pool, calling `WaitGroup.start` beforehand, and /// `WaitGroup.finish` after it returns. /// -/// The first argument passed to `func` is a dense `usize` thread id, the rest +/// The first argument passed to `func` is a dense `u32` thread id, the rest /// of the arguments are passed from `args`. Requires the pool to have been /// initialized with `.track_ids = true`. /// @@ -172,7 +172,7 @@ pub fn spawnWgId(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, ar run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } }, wait_group: *WaitGroup, - fn runFn(runnable: *Runnable, id: ?usize) void { + fn runFn(runnable: *Runnable, id: ?u32) void { const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable); const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node)); @call(.auto, func, .{id.?} ++ closure.arguments); @@ -258,7 +258,7 @@ fn worker(pool: *Pool) void { pool.mutex.lock(); defer pool.mutex.unlock(); - const id = if (pool.ids.count() > 0) pool.ids.count() else null; + const id: ?u32 = if (pool.ids.count() > 0) @intCast(pool.ids.count()) else null; if (id) |_| pool.ids.putAssumeCapacityNoClobber(std.Thread.getCurrentId(), {}); while (true) { @@ -280,12 +280,15 @@ fn worker(pool: *Pool) void { } pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void { - var id: ?usize = null; + var id: ?u32 = null; while (!wait_group.isDone()) { pool.mutex.lock(); if (pool.run_queue.popFirst()) |run_node| { - id = id orelse pool.ids.getIndex(std.Thread.getCurrentId()); + id = id orelse if (pool.ids.getIndex(std.Thread.getCurrentId())) |index| + @intCast(index) + else + null; pool.mutex.unlock(); run_node.data.runFn(&run_node.data, id); continue; @@ -297,6 +300,6 @@ pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void { } } -pub fn getIdCount(pool: *Pool) usize { - return 1 + pool.threads.len; +pub fn getIdCount(pool: *Pool) u32 { + return @intCast(1 + pool.threads.len); } diff --git a/src/Compilation.zig b/src/Compilation.zig index a54205dddf..74e8222bc3 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2746,8 +2746,8 @@ pub fn makeBinFileWritable(comp: *Compilation) !void { const Header = extern struct { intern_pool: extern struct { //items_len: u32, - extra_len: u32, - limbs_len: u32, + //extra_len: u32, + //limbs_len: u32, //string_bytes_len: u32, tracked_insts_len: u32, src_hash_deps_len: u32, @@ -2775,8 +2775,8 @@ pub fn saveState(comp: *Compilation) !void { const header: Header = .{ .intern_pool = .{ //.items_len = @intCast(ip.items.len), - .extra_len = @intCast(ip.extra.items.len), - .limbs_len = @intCast(ip.limbs.items.len), + //.extra_len = @intCast(ip.extra.items.len), + //.limbs_len = @intCast(ip.limbs.items.len), //.string_bytes_len = @intCast(ip.string_bytes.items.len), .tracked_insts_len = @intCast(ip.tracked_insts.count()), .src_hash_deps_len = @intCast(ip.src_hash_deps.count()), @@ -2790,8 +2790,8 @@ pub fn saveState(comp: *Compilation) !void { }, }; addBuf(&bufs_list, &bufs_len, mem.asBytes(&header)); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.limbs.items)); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.extra.items)); + //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.limbs.items)); + //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.extra.items)); //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data))); //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag))); //addBuf(&bufs_list, &bufs_len, ip.string_bytes.items); diff --git a/src/InternPool.zig b/src/InternPool.zig index 8002b8d2f3..9f179b601e 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -8,13 +8,6 @@ tid_width: if (single_threaded) u0 else std.math.Log2Int(u32) = 0, tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31, tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31, -extra: std.ArrayListUnmanaged(u32) = .{}, -/// On 32-bit systems, this array is ignored and extra is used for everything. -/// On 64-bit systems, this array is used for big integers and associated metadata. -/// Use the helper methods instead of accessing this directly in order to not -/// violate the above mechanism. -limbs: std.ArrayListUnmanaged(u64) = .{}, - /// Rather than allocating Decl objects with an Allocator, we instead allocate /// them with this SegmentedList. This provides four advantages: /// * Stable memory so that one thread can access a Decl object while another @@ -352,14 +345,32 @@ const Local = struct { mutate: struct { arena: std.heap.ArenaAllocator.State, items: Mutate, + extra: Mutate, + limbs: Mutate, strings: Mutate, } align(std.atomic.cache_line), const Shared = struct { items: List(Item), + extra: Extra, + limbs: Limbs, strings: Strings, + + pub fn getLimbs(shared: *const Local.Shared) Limbs { + return switch (@sizeOf(Limb)) { + @sizeOf(u32) => shared.extra, + @sizeOf(u64) => shared.limbs, + else => @compileError("unsupported host"), + }.acquire(); + } }; + const Extra = List(struct { u32 }); + const Limbs = switch (@sizeOf(Limb)) { + @sizeOf(u32) => Extra, + @sizeOf(u64) => List(struct { u64 }), + else => @compileError("unsupported host"), + }; const Strings = List(struct { u8 }); const Mutate = struct { @@ -384,7 +395,25 @@ const Local = struct { const fields = std.enums.values(std.meta.FieldEnum(Elem)); - fn Slice(comptime opts: struct { is_const: bool = false }) type { + fn PtrArrayElem(comptime len: usize) type { + const elem_info = @typeInfo(Elem).Struct; + const elem_fields = elem_info.fields; + var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined; + for (&new_fields, elem_fields) |*new_field, elem_field| new_field.* = .{ + .name = elem_field.name, + .type = *[len]elem_field.type, + .default_value = null, + .is_comptime = false, + .alignment = 0, + }; + return @Type(.{ .Struct = .{ + .layout = .auto, + .fields = &new_fields, + .decls = &.{}, + .is_tuple = elem_info.is_tuple, + } }); + } + fn SliceElem(comptime opts: struct { is_const: bool = false }) type { const elem_info = @typeInfo(Elem).Struct; const elem_fields = elem_info.fields; var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined; @@ -419,20 +448,19 @@ const Local = struct { pub fn appendAssumeCapacity(mutable: Mutable, elem: Elem) void { var mutable_view = mutable.view(); - defer mutable.lenPtr().* = @intCast(mutable_view.len); + defer mutable.mutate.len = @intCast(mutable_view.len); mutable_view.appendAssumeCapacity(elem); } pub fn appendSliceAssumeCapacity( mutable: Mutable, - slice: Slice(.{ .is_const = true }), + slice: SliceElem(.{ .is_const = true }), ) void { if (fields.len == 0) return; - const mutable_len = mutable.lenPtr(); - const start = mutable_len.*; + const start = mutable.mutate.len; const slice_len = @field(slice, @tagName(fields[0])).len; - assert(slice_len <= mutable.capacityPtr().* - start); - mutable_len.* = @intCast(start + slice_len); + assert(slice_len <= mutable.list.header().capacity - start); + mutable.mutate.len = @intCast(start + slice_len); const mutable_view = mutable.view(); inline for (fields) |field| { const field_slice = @field(slice, @tagName(field)); @@ -447,28 +475,43 @@ const Local = struct { } pub fn appendNTimesAssumeCapacity(mutable: Mutable, elem: Elem, len: usize) void { - const mutable_len = mutable.lenPtr(); - const start = mutable_len.*; - assert(len <= mutable.capacityPtr().* - start); - mutable_len.* = @intCast(start + len); + const start = mutable.mutate.len; + assert(len <= mutable.list.header().capacity - start); + mutable.mutate.len = @intCast(start + len); const mutable_view = mutable.view(); inline for (fields) |field| { @memset(mutable_view.items(field)[start..][0..len], @field(elem, @tagName(field))); } } - pub fn addManyAsSlice(mutable: Mutable, len: usize) Allocator.Error!Slice(.{}) { + pub fn addManyAsArray(mutable: Mutable, comptime len: usize) Allocator.Error!PtrArrayElem(len) { + try mutable.ensureUnusedCapacity(len); + return mutable.addManyAsArrayAssumeCapacity(len); + } + + pub fn addManyAsArrayAssumeCapacity(mutable: Mutable, comptime len: usize) PtrArrayElem(len) { + const start = mutable.mutate.len; + assert(len <= mutable.list.header().capacity - start); + mutable.mutate.len = @intCast(start + len); + const mutable_view = mutable.view(); + var ptr_array: PtrArrayElem(len) = undefined; + inline for (fields) |field| { + @field(ptr_array, @tagName(field)) = mutable_view.items(field)[start..][0..len]; + } + return ptr_array; + } + + pub fn addManyAsSlice(mutable: Mutable, len: usize) Allocator.Error!SliceElem(.{}) { try mutable.ensureUnusedCapacity(len); return mutable.addManyAsSliceAssumeCapacity(len); } - pub fn addManyAsSliceAssumeCapacity(mutable: Mutable, len: usize) Slice(.{}) { - const mutable_len = mutable.lenPtr(); - const start = mutable_len.*; - assert(len <= mutable.capacityPtr().* - start); - mutable_len.* = @intCast(start + len); + pub fn addManyAsSliceAssumeCapacity(mutable: Mutable, len: usize) SliceElem(.{}) { + const start = mutable.mutate.len; + assert(len <= mutable.list.header().capacity - start); + mutable.mutate.len = @intCast(start + len); const mutable_view = mutable.view(); - var slice: Slice(.{}) = undefined; + var slice: SliceElem(.{}) = undefined; inline for (fields) |field| { @field(slice, @tagName(field)) = mutable_view.items(field)[start..][0..len]; } @@ -476,17 +519,16 @@ const Local = struct { } pub fn shrinkRetainingCapacity(mutable: Mutable, len: usize) void { - const mutable_len = mutable.lenPtr(); - assert(len <= mutable_len.*); - mutable_len.* = @intCast(len); + assert(len <= mutable.mutate.len); + mutable.mutate.len = @intCast(len); } pub fn ensureUnusedCapacity(mutable: Mutable, unused_capacity: usize) Allocator.Error!void { - try mutable.ensureTotalCapacity(@intCast(mutable.lenPtr().* + unused_capacity)); + try mutable.ensureTotalCapacity(@intCast(mutable.mutate.len + unused_capacity)); } pub fn ensureTotalCapacity(mutable: Mutable, total_capacity: usize) Allocator.Error!void { - const old_capacity = mutable.capacityPtr().*; + const old_capacity = mutable.list.header().capacity; if (old_capacity >= total_capacity) return; var new_capacity = old_capacity; while (new_capacity < total_capacity) new_capacity = (new_capacity + 10) * 2; @@ -503,7 +545,7 @@ const Local = struct { ); var new_list: ListSelf = .{ .bytes = @ptrCast(buf[bytes_offset..].ptr) }; new_list.header().* = .{ .capacity = capacity }; - const len = mutable.lenPtr().*; + const len = mutable.mutate.len; // this cold, quickly predictable, condition enables // the `MultiArrayList` optimization in `view` if (len > 0) { @@ -515,27 +557,19 @@ const Local = struct { } fn view(mutable: Mutable) View { - const capacity = mutable.capacityPtr().*; + const capacity = mutable.list.header().capacity; assert(capacity > 0); // optimizes `MultiArrayList.Slice.items` return .{ .bytes = mutable.list.bytes, - .len = mutable.lenPtr().*, + .len = mutable.mutate.len, .capacity = capacity, }; } - - pub fn lenPtr(mutable: Mutable) *u32 { - return &mutable.mutate.len; - } - - pub fn capacityPtr(mutable: Mutable) *u32 { - return &mutable.list.header().capacity; - } }; const empty: ListSelf = .{ .bytes = @constCast(&(extern struct { header: Header, - bytes: [0]u8, + bytes: [0]u8 align(@alignOf(Elem)), }{ .header = .{ .capacity = 0 }, .bytes = .{}, @@ -580,6 +614,32 @@ const Local = struct { }; } + pub fn getMutableExtra(local: *Local, gpa: std.mem.Allocator) Extra.Mutable { + return .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.extra, + .list = &local.shared.extra, + }; + } + + /// On 32-bit systems, this array is ignored and extra is used for everything. + /// On 64-bit systems, this array is used for big integers and associated metadata. + /// Use the helper methods instead of accessing this directly in order to not + /// violate the above mechanism. + pub fn getMutableLimbs(local: *Local, gpa: std.mem.Allocator) Limbs.Mutable { + return switch (@sizeOf(Limb)) { + @sizeOf(u32) => local.getMutableExtra(gpa), + @sizeOf(u64) => .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.limbs, + .list = &local.shared.limbs, + }, + else => @compileError("unsupported host"), + }; + } + /// In order to store references to strings in fewer bytes, we copy all /// string bytes into here. String bytes can be null. It is up to whomever /// is referencing the data here whether they want to store both index and length, @@ -817,8 +877,9 @@ pub const String = enum(u32) { } fn toOverlongSlice(string: String, ip: *const InternPool) []const u8 { - const unwrapped = string.unwrap(ip); - return ip.getLocalShared(unwrapped.tid).strings.acquire().view().items(.@"0")[unwrapped.index..]; + const unwrapped_string = string.unwrap(ip); + const strings = ip.getLocalShared(unwrapped_string.tid).strings.acquire(); + return strings.view().items(.@"0")[unwrapped_string.index..]; } }; @@ -848,11 +909,15 @@ pub const NullTerminatedString = enum(u32) { /// This type exists to provide a struct with lifetime that is /// not invalidated when items are added to the `InternPool`. pub const Slice = struct { + tid: Zcu.PerThread.Id, start: u32, len: u32, + pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(slice: Slice, ip: *const InternPool) []NullTerminatedString { - return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); + const extra = ip.getLocalShared(slice.tid).extra.acquire(); + return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]); } }; @@ -983,10 +1048,15 @@ pub const CaptureValue = packed struct(u32) { }; pub const Slice = struct { + tid: Zcu.PerThread.Id, start: u32, len: u32, + + pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(slice: Slice, ip: *const InternPool) []CaptureValue { - return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); + const extra = ip.getLocalShared(slice.tid).extra.acquire(); + return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]); } }; }; @@ -1272,6 +1342,7 @@ pub const Key = union(enum) { }; pub const Func = struct { + tid: Zcu.PerThread.Id, /// In the case of a generic function, this type will potentially have fewer parameters /// than the generic owner's type, because the comptime parameters will be deleted. ty: Index, @@ -1327,23 +1398,27 @@ pub const Key = union(enum) { /// Returns a pointer that becomes invalid after any additions to the `InternPool`. pub fn analysis(func: *const Func, ip: *const InternPool) *FuncAnalysis { - return @ptrCast(&ip.extra.items[func.analysis_extra_index]); + const extra = ip.getLocalShared(func.tid).extra.acquire(); + return @ptrCast(&extra.view().items(.@"0")[func.analysis_extra_index]); } /// Returns a pointer that becomes invalid after any additions to the `InternPool`. pub fn zirBodyInst(func: *const Func, ip: *const InternPool) *TrackedInst.Index { - return @ptrCast(&ip.extra.items[func.zir_body_inst_extra_index]); + const extra = ip.getLocalShared(func.tid).extra.acquire(); + return @ptrCast(&extra.view().items(.@"0")[func.zir_body_inst_extra_index]); } /// Returns a pointer that becomes invalid after any additions to the `InternPool`. pub fn branchQuota(func: *const Func, ip: *const InternPool) *u32 { - return &ip.extra.items[func.branch_quota_extra_index]; + const extra = ip.getLocalShared(func.tid).extra.acquire(); + return &extra.view().items(.@"0")[func.branch_quota_extra_index]; } /// Returns a pointer that becomes invalid after any additions to the `InternPool`. pub fn resolvedErrorSet(func: *const Func, ip: *const InternPool) *Index { + const extra = ip.getLocalShared(func.tid).extra.acquire(); assert(func.analysis(ip).inferred_error_set); - return @ptrCast(&ip.extra.items[func.resolved_error_set_extra_index]); + return @ptrCast(&extra.view().items(.@"0")[func.resolved_error_set_extra_index]); } }; @@ -2186,6 +2261,7 @@ pub const RequiresComptime = enum(u2) { no, yes, unknown, wip }; // minimal hashmap key, this type is a convenience type that contains info // needed by semantic analysis. pub const LoadedUnionType = struct { + tid: Zcu.PerThread.Id, /// The index of the `Tag.TypeUnion` payload. extra_index: u32, /// The Decl that corresponds to the union itself. @@ -2258,7 +2334,7 @@ pub const LoadedUnionType = struct { } }; - pub fn loadTagType(self: LoadedUnionType, ip: *InternPool) LoadedEnumType { + pub fn loadTagType(self: LoadedUnionType, ip: *const InternPool) LoadedEnumType { return ip.loadEnumType(self.enum_tag_ty); } @@ -2271,26 +2347,30 @@ pub const LoadedUnionType = struct { /// when it is mutated, the mutations are observed. /// The returned pointer expires with any addition to the `InternPool`. pub fn tagTypePtr(self: LoadedUnionType, ip: *const InternPool) *Index { + const extra = ip.getLocalShared(self.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?; - return @ptrCast(&ip.extra.items[self.extra_index + field_index]); + return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]); } /// The returned pointer expires with any addition to the `InternPool`. pub fn flagsPtr(self: LoadedUnionType, ip: *const InternPool) *Tag.TypeUnion.Flags { + const extra = ip.getLocalShared(self.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?; - return @ptrCast(&ip.extra.items[self.extra_index + field_index]); + return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]); } /// The returned pointer expires with any addition to the `InternPool`. pub fn size(self: LoadedUnionType, ip: *const InternPool) *u32 { + const extra = ip.getLocalShared(self.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?; - return &ip.extra.items[self.extra_index + field_index]; + return &extra.view().items(.@"0")[self.extra_index + field_index]; } /// The returned pointer expires with any addition to the `InternPool`. pub fn padding(self: LoadedUnionType, ip: *const InternPool) *u32 { + const extra = ip.getLocalShared(self.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?; - return &ip.extra.items[self.extra_index + field_index]; + return &extra.view().items(.@"0")[self.extra_index + field_index]; } pub fn hasTag(self: LoadedUnionType, ip: *const InternPool) bool { @@ -2319,7 +2399,7 @@ pub const LoadedUnionType = struct { const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?; const zir_index_field_index = std.meta.fieldIndex(Tag.TypeUnion, "zir_index").?; const ptr: *TrackedInst.Index.Optional = - @ptrCast(&ip.extra.items[self.flags_index - flags_field_index + zir_index_field_index]); + @ptrCast(&ip.extra_.items[self.flags_index - flags_field_index + zir_index_field_index]); ptr.* = new_zir_index; } @@ -2335,18 +2415,21 @@ pub const LoadedUnionType = struct { }; pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { - const data = index.getData(ip); - const type_union = ip.extraDataTrail(Tag.TypeUnion, data); + const unwrapped_index = index.unwrap(ip); + const extra_list = unwrapped_index.getExtra(ip); + const data = unwrapped_index.getData(ip); + const type_union = extraDataTrail(extra_list, Tag.TypeUnion, data); const fields_len = type_union.data.fields_len; var extra_index = type_union.end; const captures_len = if (type_union.data.flags.any_captures) c: { - const len = ip.extra.items[extra_index]; + const len = extra_list.view().items(.@"0")[extra_index]; extra_index += 1; break :c len; } else 0; const captures: CaptureValue.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = captures_len, }; @@ -2356,21 +2439,24 @@ pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { } const field_types: Index.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = fields_len, }; extra_index += fields_len; - const field_aligns: Alignment.Slice = if (type_union.data.flags.any_aligned_fields) a: { + const field_aligns = if (type_union.data.flags.any_aligned_fields) a: { const a: Alignment.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = fields_len, }; extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable; break :a a; - } else .{ .start = 0, .len = 0 }; + } else Alignment.Slice.empty; return .{ + .tid = unwrapped_index.tid, .extra_index = data, .decl = type_union.data.decl, .namespace = type_union.data.namespace, @@ -2383,6 +2469,7 @@ pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { } pub const LoadedStructType = struct { + tid: Zcu.PerThread.Id, /// The index of the `Tag.TypeStruct` or `Tag.TypeStructPacked` payload. extra_index: u32, /// The struct's owner Decl. `none` when the struct is `@TypeOf(.{})`. @@ -2404,12 +2491,16 @@ pub const LoadedStructType = struct { captures: CaptureValue.Slice, pub const ComptimeBits = struct { + tid: Zcu.PerThread.Id, start: u32, /// This is the number of u32 elements, not the number of struct fields. len: u32, + pub const empty: ComptimeBits = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(this: ComptimeBits, ip: *const InternPool) []u32 { - return ip.extra.items[this.start..][0..this.len]; + const extra = ip.getLocalShared(this.tid).extra.acquire(); + return extra.view().items(.@"0")[this.start..][0..this.len]; } pub fn getBit(this: ComptimeBits, ip: *const InternPool, i: usize) bool { @@ -2427,11 +2518,15 @@ pub const LoadedStructType = struct { }; pub const Offsets = struct { + tid: Zcu.PerThread.Id, start: u32, len: u32, + pub const empty: Offsets = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(this: Offsets, ip: *const InternPool) []u32 { - return @ptrCast(ip.extra.items[this.start..][0..this.len]); + const extra = ip.getLocalShared(this.tid).extra.acquire(); + return @ptrCast(extra.view().items(.@"0")[this.start..][0..this.len]); } }; @@ -2443,11 +2538,15 @@ pub const LoadedStructType = struct { _, pub const Slice = struct { + tid: Zcu.PerThread.Id, start: u32, len: u32, + pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(slice: RuntimeOrder.Slice, ip: *const InternPool) []RuntimeOrder { - return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); + const extra = ip.getLocalShared(slice.tid).extra.acquire(); + return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]); } }; @@ -2479,7 +2578,8 @@ pub const LoadedStructType = struct { ip: *InternPool, name: NullTerminatedString, ) ?u32 { - return ip.addFieldName(self.names_map.unwrap().?, self.field_names.start, name); + const extra = ip.getLocalShared(self.tid).extra.acquire(); + return ip.addFieldName(extra, self.names_map.unwrap().?, self.field_names.start, name); } pub fn fieldAlign(s: LoadedStructType, ip: *const InternPool, i: usize) Alignment { @@ -2487,7 +2587,7 @@ pub const LoadedStructType = struct { return s.field_aligns.get(ip)[i]; } - pub fn fieldInit(s: LoadedStructType, ip: *const InternPool, i: usize) Index { + pub fn fieldInit(s: LoadedStructType, ip: *InternPool, i: usize) Index { if (s.field_inits.len == 0) return .none; assert(s.haveFieldInits(ip)); return s.field_inits.get(ip)[i]; @@ -2518,18 +2618,20 @@ pub const LoadedStructType = struct { /// The returned pointer expires with any addition to the `InternPool`. /// Asserts the struct is not packed. - pub fn flagsPtr(self: LoadedStructType, ip: *const InternPool) *Tag.TypeStruct.Flags { + pub fn flagsPtr(self: LoadedStructType, ip: *InternPool) *Tag.TypeStruct.Flags { assert(self.layout != .@"packed"); + const extra = ip.getLocalShared(self.tid).extra.acquire(); const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?; - return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]); + return @ptrCast(&extra.view().items(.@"0")[self.extra_index + flags_field_index]); } /// The returned pointer expires with any addition to the `InternPool`. /// Asserts that the struct is packed. - pub fn packedFlagsPtr(self: LoadedStructType, ip: *const InternPool) *Tag.TypeStructPacked.Flags { + pub fn packedFlagsPtr(self: LoadedStructType, ip: *InternPool) *Tag.TypeStructPacked.Flags { assert(self.layout == .@"packed"); + const extra = ip.getLocalShared(self.tid).extra.acquire(); const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?; - return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]); + return @ptrCast(&extra.view().items(.@"0")[self.extra_index + flags_field_index]); } pub fn assumeRuntimeBitsIfFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool { @@ -2621,25 +2723,27 @@ pub const LoadedStructType = struct { /// Asserts the struct is not packed. pub fn size(self: LoadedStructType, ip: *InternPool) *u32 { assert(self.layout != .@"packed"); + const extra = ip.getLocalShared(self.tid).extra.acquire(); const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?; - return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]); + return @ptrCast(&extra.view().items(.@"0")[self.extra_index + size_field_index]); } /// The backing integer type of the packed struct. Whether zig chooses /// this type or the user specifies it, it is stored here. This will be /// set to `none` until the layout is resolved. /// Asserts the struct is packed. - pub fn backingIntType(s: LoadedStructType, ip: *const InternPool) *Index { + pub fn backingIntType(s: LoadedStructType, ip: *InternPool) *Index { assert(s.layout == .@"packed"); + const extra = ip.getLocalShared(s.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?; - return @ptrCast(&ip.extra.items[s.extra_index + field_index]); + return @ptrCast(&extra.view().items(.@"0")[s.extra_index + field_index]); } /// Asserts the struct is not packed. pub fn setZirIndex(s: LoadedStructType, ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void { assert(s.layout != .@"packed"); const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?; - ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index); + ip.extra_.items[s.extra_index + field_index] = @intFromEnum(new_zir_index); } pub fn haveFieldTypes(s: LoadedStructType, ip: *const InternPool) bool { @@ -2647,7 +2751,7 @@ pub const LoadedStructType = struct { return types.len == 0 or types[0] != .none; } - pub fn haveFieldInits(s: LoadedStructType, ip: *const InternPool) bool { + pub fn haveFieldInits(s: LoadedStructType, ip: *InternPool) bool { return switch (s.layout) { .@"packed" => s.packedFlagsPtr(ip).inits_resolved, .auto, .@"extern" => s.flagsPtr(ip).inits_resolved, @@ -2757,34 +2861,38 @@ pub const LoadedStructType = struct { }; pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const extra_list = unwrapped_index.getExtra(ip); + const item = unwrapped_index.getItem(ip); switch (item.tag) { .type_struct => { if (item.data == 0) return .{ + .tid = .main, .extra_index = 0, .decl = .none, .namespace = .none, .zir_index = .none, .layout = .auto, - .field_names = .{ .start = 0, .len = 0 }, - .field_types = .{ .start = 0, .len = 0 }, - .field_inits = .{ .start = 0, .len = 0 }, - .field_aligns = .{ .start = 0, .len = 0 }, - .runtime_order = .{ .start = 0, .len = 0 }, - .comptime_bits = .{ .start = 0, .len = 0 }, - .offsets = .{ .start = 0, .len = 0 }, + .field_names = NullTerminatedString.Slice.empty, + .field_types = Index.Slice.empty, + .field_inits = Index.Slice.empty, + .field_aligns = Alignment.Slice.empty, + .runtime_order = LoadedStructType.RuntimeOrder.Slice.empty, + .comptime_bits = LoadedStructType.ComptimeBits.empty, + .offsets = LoadedStructType.Offsets.empty, .names_map = .none, - .captures = .{ .start = 0, .len = 0 }, + .captures = CaptureValue.Slice.empty, }; - const extra = ip.extraDataTrail(Tag.TypeStruct, item.data); + const extra = extraDataTrail(extra_list, Tag.TypeStruct, item.data); const fields_len = extra.data.fields_len; var extra_index = extra.end; const captures_len = if (extra.data.flags.any_captures) c: { - const len = ip.extra.items[extra_index]; + const len = extra_list.view().items(.@"0")[extra_index]; extra_index += 1; break :c len; } else 0; const captures: CaptureValue.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = captures_len, }; @@ -2793,49 +2901,75 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { extra_index += 2; // PackedU64 } const field_types: Index.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = fields_len, }; extra_index += fields_len; - const names_map: OptionalMapIndex, const names: NullTerminatedString.Slice = if (!extra.data.flags.is_tuple) n: { - const names_map: OptionalMapIndex = @enumFromInt(ip.extra.items[extra_index]); + const names_map: OptionalMapIndex, const names = if (!extra.data.flags.is_tuple) n: { + const names_map: OptionalMapIndex = @enumFromInt(extra_list.view().items(.@"0")[extra_index]); extra_index += 1; - const names: NullTerminatedString.Slice = .{ .start = extra_index, .len = fields_len }; + const names: NullTerminatedString.Slice = .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = fields_len, + }; extra_index += fields_len; break :n .{ names_map, names }; - } else .{ .none, .{ .start = 0, .len = 0 } }; + } else .{ .none, NullTerminatedString.Slice.empty }; const inits: Index.Slice = if (extra.data.flags.any_default_inits) i: { - const inits: Index.Slice = .{ .start = extra_index, .len = fields_len }; + const inits: Index.Slice = .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = fields_len, + }; extra_index += fields_len; break :i inits; - } else .{ .start = 0, .len = 0 }; + } else Index.Slice.empty; const namespace: OptionalNamespaceIndex = if (extra.data.flags.has_namespace) n: { - const n: NamespaceIndex = @enumFromInt(ip.extra.items[extra_index]); + const n: NamespaceIndex = @enumFromInt(extra_list.view().items(.@"0")[extra_index]); extra_index += 1; break :n n.toOptional(); } else .none; const aligns: Alignment.Slice = if (extra.data.flags.any_aligned_fields) a: { - const a: Alignment.Slice = .{ .start = extra_index, .len = fields_len }; + const a: Alignment.Slice = .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = fields_len, + }; extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable; break :a a; - } else .{ .start = 0, .len = 0 }; + } else Alignment.Slice.empty; const comptime_bits: LoadedStructType.ComptimeBits = if (extra.data.flags.any_comptime_fields) c: { const len = std.math.divCeil(u32, fields_len, 32) catch unreachable; - const c: LoadedStructType.ComptimeBits = .{ .start = extra_index, .len = len }; + const c: LoadedStructType.ComptimeBits = .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = len, + }; extra_index += len; break :c c; - } else .{ .start = 0, .len = 0 }; + } else LoadedStructType.ComptimeBits.empty; const runtime_order: LoadedStructType.RuntimeOrder.Slice = if (!extra.data.flags.is_extern) ro: { - const ro: LoadedStructType.RuntimeOrder.Slice = .{ .start = extra_index, .len = fields_len }; + const ro: LoadedStructType.RuntimeOrder.Slice = .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = fields_len, + }; extra_index += fields_len; break :ro ro; - } else .{ .start = 0, .len = 0 }; + } else LoadedStructType.RuntimeOrder.Slice.empty; const offsets: LoadedStructType.Offsets = o: { - const o: LoadedStructType.Offsets = .{ .start = extra_index, .len = fields_len }; + const o: LoadedStructType.Offsets = .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = fields_len, + }; extra_index += fields_len; break :o o; }; return .{ + .tid = unwrapped_index.tid, .extra_index = item.data, .decl = extra.data.decl.toOptional(), .namespace = namespace, @@ -2853,16 +2987,17 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { }; }, .type_struct_packed, .type_struct_packed_inits => { - const extra = ip.extraDataTrail(Tag.TypeStructPacked, item.data); + const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, item.data); const has_inits = item.tag == .type_struct_packed_inits; const fields_len = extra.data.fields_len; var extra_index = extra.end; const captures_len = if (extra.data.flags.any_captures) c: { - const len = ip.extra.items[extra_index]; + const len = extra_list.view().items(.@"0")[extra_index]; extra_index += 1; break :c len; } else 0; const captures: CaptureValue.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = captures_len, }; @@ -2871,24 +3006,28 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { extra_index += 2; // PackedU64 } const field_types: Index.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = fields_len, }; extra_index += fields_len; const field_names: NullTerminatedString.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = fields_len, }; extra_index += fields_len; const field_inits: Index.Slice = if (has_inits) inits: { const i: Index.Slice = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = fields_len, }; extra_index += fields_len; break :inits i; - } else .{ .start = 0, .len = 0 }; + } else Index.Slice.empty; return .{ + .tid = unwrapped_index.tid, .extra_index = item.data, .decl = extra.data.decl.toOptional(), .namespace = extra.data.namespace, @@ -2897,10 +3036,10 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .field_names = field_names, .field_types = field_types, .field_inits = field_inits, - .field_aligns = .{ .start = 0, .len = 0 }, - .runtime_order = .{ .start = 0, .len = 0 }, - .comptime_bits = .{ .start = 0, .len = 0 }, - .offsets = .{ .start = 0, .len = 0 }, + .field_aligns = Alignment.Slice.empty, + .runtime_order = LoadedStructType.RuntimeOrder.Slice.empty, + .comptime_bits = LoadedStructType.ComptimeBits.empty, + .offsets = LoadedStructType.Offsets.empty, .names_map = extra.data.names_map.toOptional(), .captures = captures, }; @@ -2981,10 +3120,12 @@ const LoadedEnumType = struct { }; pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const extra_list = unwrapped_index.getExtra(ip); + const item = unwrapped_index.getItem(ip); const tag_mode: LoadedEnumType.TagMode = switch (item.tag) { .type_enum_auto => { - const extra = ip.extraDataTrail(EnumAuto, item.data); + const extra = extraDataTrail(extra_list, EnumAuto, item.data); var extra_index: u32 = @intCast(extra.end); if (extra.data.zir_index == .none) { extra_index += 1; // owner_union @@ -2998,15 +3139,17 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { .namespace = extra.data.namespace, .tag_ty = extra.data.int_tag_type, .names = .{ + .tid = unwrapped_index.tid, .start = extra_index + captures_len, .len = extra.data.fields_len, }, - .values = .{ .start = 0, .len = 0 }, + .values = Index.Slice.empty, .tag_mode = .auto, .names_map = extra.data.names_map, .values_map = .none, .zir_index = extra.data.zir_index, .captures = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = captures_len, }, @@ -3016,7 +3159,7 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { .type_enum_nonexhaustive => .nonexhaustive, else => unreachable, }; - const extra = ip.extraDataTrail(EnumExplicit, item.data); + const extra = extraDataTrail(extra_list, EnumExplicit, item.data); var extra_index: u32 = @intCast(extra.end); if (extra.data.zir_index == .none) { extra_index += 1; // owner_union @@ -3030,10 +3173,12 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { .namespace = extra.data.namespace, .tag_ty = extra.data.int_tag_type, .names = .{ + .tid = unwrapped_index.tid, .start = extra_index + captures_len, .len = extra.data.fields_len, }, .values = .{ + .tid = unwrapped_index.tid, .start = extra_index + captures_len + extra.data.fields_len, .len = if (extra.data.values_map != .none) extra.data.fields_len else 0, }, @@ -3042,6 +3187,7 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { .values_map = extra.data.values_map, .zir_index = extra.data.zir_index, .captures = .{ + .tid = unwrapped_index.tid, .start = extra_index, .len = captures_len, }, @@ -3060,9 +3206,10 @@ pub const LoadedOpaqueType = struct { }; pub fn loadOpaqueType(ip: *const InternPool, index: Index) LoadedOpaqueType { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); assert(item.tag == .type_opaque); - const extra = ip.extraDataTrail(Tag.TypeOpaque, item.data); + const extra = extraDataTrail(unwrapped_index.getExtra(ip), Tag.TypeOpaque, item.data); const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) 0 else @@ -3072,6 +3219,7 @@ pub fn loadOpaqueType(ip: *const InternPool, index: Index) LoadedOpaqueType { .namespace = extra.data.namespace, .zir_index = extra.data.zir_index, .captures = .{ + .tid = unwrapped_index.tid, .start = extra.end, .len = captures_len, }, @@ -3214,11 +3362,15 @@ pub const Index = enum(u32) { /// This type exists to provide a struct with lifetime that is /// not invalidated when items are added to the `InternPool`. pub const Slice = struct { + tid: Zcu.PerThread.Id, start: u32, len: u32, + pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(slice: Slice, ip: *const InternPool) []Index { - return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); + const extra = ip.getLocalShared(slice.tid).extra.acquire(); + return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]); } }; @@ -3237,37 +3389,6 @@ pub const Index = enum(u32) { } }; - pub fn getItem(index: Index, ip: *const InternPool) Item { - const item_ptr = index.itemPtr(ip); - const tag = @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); - return .{ .tag = tag, .data = item_ptr.data_ptr.* }; - } - - pub fn getTag(index: Index, ip: *const InternPool) Tag { - const item_ptr = index.itemPtr(ip); - return @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); - } - - pub fn getData(index: Index, ip: *const InternPool) u32 { - return index.getItem(ip).data; - } - - const ItemPtr = struct { - tag_ptr: *Tag, - data_ptr: *u32, - }; - fn itemPtr(index: Index, ip: *const InternPool) ItemPtr { - const unwrapped: Unwrapped = if (single_threaded) .{ - .tid = .main, - .index = @intFromEnum(index), - } else index.unwrap(ip); - const slice = ip.getLocalShared(unwrapped.tid).items.acquire().view().slice(); - return .{ - .tag_ptr = &slice.items(.tag)[unwrapped.index], - .data_ptr = &slice.items(.data)[unwrapped.index], - }; - } - const Unwrapped = struct { tid: Zcu.PerThread.Id, index: u32, @@ -3277,9 +3398,43 @@ pub const Index = enum(u32) { assert(unwrapped.index <= ip.getIndexMask(u31)); return @enumFromInt(@intFromEnum(unwrapped.tid) << ip.tid_shift_31 | unwrapped.index); } + + pub fn getExtra(unwrapped: Unwrapped, ip: *const InternPool) Local.Extra { + return ip.getLocalShared(unwrapped.tid).extra.acquire(); + } + + pub fn getItem(unwrapped: Unwrapped, ip: *const InternPool) Item { + const item_ptr = unwrapped.itemPtr(ip); + const tag = @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); + return .{ .tag = tag, .data = item_ptr.data_ptr.* }; + } + + pub fn getTag(unwrapped: Unwrapped, ip: *const InternPool) Tag { + const item_ptr = unwrapped.itemPtr(ip); + return @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); + } + + pub fn getData(unwrapped: Unwrapped, ip: *const InternPool) u32 { + return unwrapped.getItem(ip).data; + } + + const ItemPtr = struct { + tag_ptr: *Tag, + data_ptr: *u32, + }; + fn itemPtr(unwrapped: Unwrapped, ip: *const InternPool) ItemPtr { + const slice = ip.getLocalShared(unwrapped.tid).items.acquire().view().slice(); + return .{ + .tag_ptr = &slice.items(.tag)[unwrapped.index], + .data_ptr = &slice.items(.data)[unwrapped.index], + }; + } }; - fn unwrap(index: Index, ip: *const InternPool) Unwrapped { - return .{ + pub fn unwrap(index: Index, ip: *const InternPool) Unwrapped { + return if (single_threaded) .{ + .tid = .main, + .index = @intFromEnum(index), + } else .{ .tid = @enumFromInt(@intFromEnum(index) >> ip.tid_shift_31 & ip.getTidMask()), .index = @intFromEnum(index) & ip.getIndexMask(u31), }; @@ -3643,9 +3798,9 @@ pub const static_keys = [_]Key{ // empty_struct_type .{ .anon_struct_type = .{ - .types = .{ .start = 0, .len = 0 }, - .names = .{ .start = 0, .len = 0 }, - .values = .{ .start = 0, .len = 0 }, + .types = Index.Slice.empty, + .names = NullTerminatedString.Slice.empty, + .values = Index.Slice.empty, } }, .{ .simple_value = .undefined }, @@ -4563,14 +4718,18 @@ pub const Alignment = enum(u6) { /// This type exists to provide a struct with lifetime that is /// not invalidated when items are added to the `InternPool`. pub const Slice = struct { + tid: Zcu.PerThread.Id, start: u32, /// This is the number of alignment values, not the number of u32 elements. len: u32, + pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; + pub fn get(slice: Slice, ip: *const InternPool) []Alignment { // TODO: implement @ptrCast between slices changing the length - //const bytes: []u8 = @ptrCast(ip.extra.items[slice.start..]); - const bytes: []u8 = std.mem.sliceAsBytes(ip.extra.items[slice.start..]); + const extra = ip.getLocalShared(slice.tid).extra.acquire(); + //const bytes: []u8 = @ptrCast(extra.view().items(.@"0")[slice.start..]); + const bytes: []u8 = std.mem.sliceAsBytes(extra.view().items(.@"0")[slice.start..]); return @ptrCast(bytes[0..slice.len]); } }; @@ -4837,9 +4996,11 @@ pub const PtrSlice = struct { }; /// Trailing: Limb for every limbs_len -pub const Int = struct { +pub const Int = packed struct { ty: Index, limbs_len: u32, + + const limbs_items_len = @divExact(@sizeOf(Int), @sizeOf(Limb)); }; pub const IntSmall = struct { @@ -4931,17 +5092,22 @@ pub const MemoizedCall = struct { pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { errdefer ip.deinit(gpa); assert(ip.locals.len == 0 and ip.shards.len == 0); + assert(available_threads > 0 and available_threads <= std.math.maxInt(u8)); const used_threads = if (single_threaded) 1 else available_threads; ip.locals = try gpa.alloc(Local, used_threads); @memset(ip.locals, .{ .shared = .{ .items = Local.List(Item).empty, + .extra = Local.Extra.empty, + .limbs = Local.Limbs.empty, .strings = Local.Strings.empty, }, .mutate = .{ .arena = .{}, .items = Local.Mutate.empty, + .extra = Local.Mutate.empty, + .limbs = Local.Mutate.empty, .strings = Local.Mutate.empty, }, }); @@ -4995,9 +5161,6 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { } pub fn deinit(ip: *InternPool, gpa: Allocator) void { - ip.extra.deinit(gpa); - ip.limbs.deinit(gpa); - ip.decls_free_list.deinit(gpa); ip.allocated_decls.deinit(gpa); @@ -5031,7 +5194,8 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { pub fn indexToKey(ip: *const InternPool, index: Index) Key { assert(index != .none); - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); const data = item.data; return switch (item.tag) { .removed => unreachable, @@ -5048,7 +5212,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, }, .type_array_big => { - const array_info = ip.extraData(Array, data); + const array_info = extraData(unwrapped_index.getExtra(ip), Array, data); return .{ .array_type = .{ .len = array_info.getLength(), .child = array_info.child, @@ -5056,7 +5220,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, .type_array_small => { - const array_info = ip.extraData(Vector, data); + const array_info = extraData(unwrapped_index.getExtra(ip), Vector, data); return .{ .array_type = .{ .len = array_info.len, .child = array_info.child, @@ -5067,20 +5231,21 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .simple_value => .{ .simple_value = @enumFromInt(@intFromEnum(index)) }, .type_vector => { - const vector_info = ip.extraData(Vector, data); + const vector_info = extraData(unwrapped_index.getExtra(ip), Vector, data); return .{ .vector_type = .{ .len = vector_info.len, .child = vector_info.child, } }; }, - .type_pointer => .{ .ptr_type = ip.extraData(Tag.TypePointer, data) }, + .type_pointer => .{ .ptr_type = extraData(unwrapped_index.getExtra(ip), Tag.TypePointer, data) }, .type_slice => { const many_ptr_index: Index = @enumFromInt(data); - const many_ptr_item = many_ptr_index.getItem(ip); + const many_ptr_unwrapped = many_ptr_index.unwrap(ip); + const many_ptr_item = many_ptr_unwrapped.getItem(ip); assert(many_ptr_item.tag == .type_pointer); - var ptr_info = ip.extraData(Tag.TypePointer, many_ptr_item.data); + var ptr_info = extraData(many_ptr_unwrapped.getExtra(ip), Tag.TypePointer, many_ptr_item.data); ptr_info.flags.size = .Slice; return .{ .ptr_type = ptr_info }; }, @@ -5088,18 +5253,18 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .type_optional => .{ .opt_type = @enumFromInt(data) }, .type_anyframe => .{ .anyframe_type = @enumFromInt(data) }, - .type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) }, + .type_error_union => .{ .error_union_type = extraData(unwrapped_index.getExtra(ip), Key.ErrorUnionType, data) }, .type_anyerror_union => .{ .error_union_type = .{ .error_set_type = .anyerror_type, .payload_type = @enumFromInt(data), } }, - .type_error_set => .{ .error_set_type = ip.extraErrorSet(data) }, + .type_error_set => .{ .error_set_type = extraErrorSet(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, .type_inferred_error_set => .{ .inferred_error_set_type = @enumFromInt(data), }, .type_opaque => .{ .opaque_type = ns: { - const extra = ip.extraDataTrail(Tag.TypeOpaque, data); + const extra = extraDataTrail(unwrapped_index.getExtra(ip), Tag.TypeOpaque, data); if (extra.data.captures_len == std.math.maxInt(u32)) { break :ns .{ .reified = .{ .zir_index = extra.data.zir_index, @@ -5109,6 +5274,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { break :ns .{ .declared = .{ .zir_index = extra.data.zir_index, .captures = .{ .owned = .{ + .tid = unwrapped_index.tid, .start = extra.end, .len = extra.data.captures_len, } }, @@ -5117,105 +5283,115 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .type_struct => .{ .struct_type = ns: { if (data == 0) break :ns .empty_struct; - const extra = ip.extraDataTrail(Tag.TypeStruct, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, Tag.TypeStruct, data); if (extra.data.flags.is_reified) { assert(!extra.data.flags.any_captures); break :ns .{ .reified = .{ .zir_index = extra.data.zir_index, - .type_hash = ip.extraData(PackedU64, extra.end).get(), + .type_hash = extraData(extra_list, PackedU64, extra.end).get(), } }; } break :ns .{ .declared = .{ .zir_index = extra.data.zir_index, .captures = .{ .owned = if (extra.data.flags.any_captures) .{ + .tid = unwrapped_index.tid, .start = extra.end + 1, - .len = ip.extra.items[extra.end], - } else .{ .start = 0, .len = 0 } }, + .len = extra_list.view().items(.@"0")[extra.end], + } else CaptureValue.Slice.empty }, } }; } }, .type_struct_packed, .type_struct_packed_inits => .{ .struct_type = ns: { - const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, data); if (extra.data.flags.is_reified) { assert(!extra.data.flags.any_captures); break :ns .{ .reified = .{ .zir_index = extra.data.zir_index, - .type_hash = ip.extraData(PackedU64, extra.end).get(), + .type_hash = extraData(extra_list, PackedU64, extra.end).get(), } }; } break :ns .{ .declared = .{ .zir_index = extra.data.zir_index, .captures = .{ .owned = if (extra.data.flags.any_captures) .{ + .tid = unwrapped_index.tid, .start = extra.end + 1, - .len = ip.extra.items[extra.end], - } else .{ .start = 0, .len = 0 } }, + .len = extra_list.view().items(.@"0")[extra.end], + } else CaptureValue.Slice.empty }, } }; } }, - .type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(ip, data) }, - .type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(ip, data) }, + .type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, + .type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, .type_union => .{ .union_type = ns: { - const extra = ip.extraDataTrail(Tag.TypeUnion, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, Tag.TypeUnion, data); if (extra.data.flags.is_reified) { assert(!extra.data.flags.any_captures); break :ns .{ .reified = .{ .zir_index = extra.data.zir_index, - .type_hash = ip.extraData(PackedU64, extra.end).get(), + .type_hash = extraData(extra_list, PackedU64, extra.end).get(), } }; } break :ns .{ .declared = .{ .zir_index = extra.data.zir_index, .captures = .{ .owned = if (extra.data.flags.any_captures) .{ + .tid = unwrapped_index.tid, .start = extra.end + 1, - .len = ip.extra.items[extra.end], - } else .{ .start = 0, .len = 0 } }, + .len = extra_list.view().items(.@"0")[extra.end], + } else CaptureValue.Slice.empty }, } }; } }, .type_enum_auto => .{ .enum_type = ns: { - const extra = ip.extraDataTrail(EnumAuto, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, EnumAuto, data); const zir_index = extra.data.zir_index.unwrap() orelse { assert(extra.data.captures_len == 0); break :ns .{ .generated_tag = .{ - .union_type = @enumFromInt(ip.extra.items[extra.end]), + .union_type = @enumFromInt(extra_list.view().items(.@"0")[extra.end]), } }; }; if (extra.data.captures_len == std.math.maxInt(u32)) { break :ns .{ .reified = .{ .zir_index = zir_index, - .type_hash = ip.extraData(PackedU64, extra.end).get(), + .type_hash = extraData(extra_list, PackedU64, extra.end).get(), } }; } break :ns .{ .declared = .{ .zir_index = zir_index, .captures = .{ .owned = .{ + .tid = unwrapped_index.tid, .start = extra.end, .len = extra.data.captures_len, } }, } }; } }, .type_enum_explicit, .type_enum_nonexhaustive => .{ .enum_type = ns: { - const extra = ip.extraDataTrail(EnumExplicit, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, EnumExplicit, data); const zir_index = extra.data.zir_index.unwrap() orelse { assert(extra.data.captures_len == 0); break :ns .{ .generated_tag = .{ - .union_type = @enumFromInt(ip.extra.items[extra.end]), + .union_type = @enumFromInt(extra_list.view().items(.@"0")[extra.end]), } }; }; if (extra.data.captures_len == std.math.maxInt(u32)) { break :ns .{ .reified = .{ .zir_index = zir_index, - .type_hash = ip.extraData(PackedU64, extra.end).get(), + .type_hash = extraData(extra_list, PackedU64, extra.end).get(), } }; } break :ns .{ .declared = .{ .zir_index = zir_index, .captures = .{ .owned = .{ + .tid = unwrapped_index.tid, .start = extra.end, .len = extra.data.captures_len, } }, } }; } }, - .type_function => .{ .func_type = ip.extraFuncType(data) }, + .type_function => .{ .func_type = extraFuncType(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, .undef => .{ .undef = @enumFromInt(data) }, .opt_null => .{ .opt = .{ @@ -5223,40 +5399,40 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .val = .none, } }, .opt_payload => { - const extra = ip.extraData(Tag.TypeValue, data); + const extra = extraData(unwrapped_index.getExtra(ip), Tag.TypeValue, data); return .{ .opt = .{ .ty = extra.ty, .val = extra.val, } }; }, .ptr_decl => { - const info = ip.extraData(PtrDecl, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrDecl, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .decl = info.decl }, .byte_offset = info.byteOffset() } }; }, .ptr_comptime_alloc => { - const info = ip.extraData(PtrComptimeAlloc, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrComptimeAlloc, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .comptime_alloc = info.index }, .byte_offset = info.byteOffset() } }; }, .ptr_anon_decl => { - const info = ip.extraData(PtrAnonDecl, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrAnonDecl, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .anon_decl = .{ .val = info.val, .orig_ty = info.ty, } }, .byte_offset = info.byteOffset() } }; }, .ptr_anon_decl_aligned => { - const info = ip.extraData(PtrAnonDeclAligned, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrAnonDeclAligned, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .anon_decl = .{ .val = info.val, .orig_ty = info.orig_ty, } }, .byte_offset = info.byteOffset() } }; }, .ptr_comptime_field => { - const info = ip.extraData(PtrComptimeField, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrComptimeField, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .comptime_field = info.field_val }, .byte_offset = info.byteOffset() } }; }, .ptr_int => { - const info = ip.extraData(PtrInt, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrInt, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .int, @@ -5264,17 +5440,17 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, .ptr_eu_payload => { - const info = ip.extraData(PtrBase, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrBase, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .eu_payload = info.base }, .byte_offset = info.byteOffset() } }; }, .ptr_opt_payload => { - const info = ip.extraData(PtrBase, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrBase, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .opt_payload = info.base }, .byte_offset = info.byteOffset() } }; }, .ptr_elem => { // Avoid `indexToKey` recursion by asserting the tag encoding. - const info = ip.extraData(PtrBaseIndex, data); - const index_item = info.index.getItem(ip); + const info = extraData(unwrapped_index.getExtra(ip), PtrBaseIndex, data); + const index_item = info.index.unwrap(ip).getItem(ip); return switch (index_item.tag) { .int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .arr_elem = .{ .base = info.base, @@ -5286,8 +5462,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .ptr_field => { // Avoid `indexToKey` recursion by asserting the tag encoding. - const info = ip.extraData(PtrBaseIndex, data); - const index_item = info.index.getItem(ip); + const info = extraData(unwrapped_index.getExtra(ip), PtrBaseIndex, data); + const index_item = info.index.unwrap(ip).getItem(ip); return switch (index_item.tag) { .int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .field = .{ .base = info.base, @@ -5298,7 +5474,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }; }, .ptr_slice => { - const info = ip.extraData(PtrSlice, data); + const info = extraData(unwrapped_index.getExtra(ip), PtrSlice, data); return .{ .slice = .{ .ty = info.ty, .ptr = info.ptr, @@ -5333,17 +5509,17 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .ty = .comptime_int_type, .storage = .{ .i64 = @as(i32, @bitCast(data)) }, } }, - .int_positive => ip.indexToKeyBigInt(data, true), - .int_negative => ip.indexToKeyBigInt(data, false), + .int_positive => ip.indexToKeyBigInt(unwrapped_index.tid, data, true), + .int_negative => ip.indexToKeyBigInt(unwrapped_index.tid, data, false), .int_small => { - const info = ip.extraData(IntSmall, data); + const info = extraData(unwrapped_index.getExtra(ip), IntSmall, data); return .{ .int = .{ .ty = info.ty, .storage = .{ .u64 = info.value }, } }; }, .int_lazy_align, .int_lazy_size => |tag| { - const info = ip.extraData(IntLazy, data); + const info = extraData(unwrapped_index.getExtra(ip), IntLazy, data); return .{ .int = .{ .ty = info.ty, .storage = switch (tag) { @@ -5363,30 +5539,30 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }, .float_f64 => .{ .float = .{ .ty = .f64_type, - .storage = .{ .f64 = ip.extraData(Float64, data).get() }, + .storage = .{ .f64 = extraData(unwrapped_index.getExtra(ip), Float64, data).get() }, } }, .float_f80 => .{ .float = .{ .ty = .f80_type, - .storage = .{ .f80 = ip.extraData(Float80, data).get() }, + .storage = .{ .f80 = extraData(unwrapped_index.getExtra(ip), Float80, data).get() }, } }, .float_f128 => .{ .float = .{ .ty = .f128_type, - .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + .storage = .{ .f128 = extraData(unwrapped_index.getExtra(ip), Float128, data).get() }, } }, .float_c_longdouble_f80 => .{ .float = .{ .ty = .c_longdouble_type, - .storage = .{ .f80 = ip.extraData(Float80, data).get() }, + .storage = .{ .f80 = extraData(unwrapped_index.getExtra(ip), Float80, data).get() }, } }, .float_c_longdouble_f128 => .{ .float = .{ .ty = .c_longdouble_type, - .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + .storage = .{ .f128 = extraData(unwrapped_index.getExtra(ip), Float128, data).get() }, } }, .float_comptime_float => .{ .float = .{ .ty = .comptime_float_type, - .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + .storage = .{ .f128 = extraData(unwrapped_index.getExtra(ip), Float128, data).get() }, } }, .variable => { - const extra = ip.extraData(Tag.Variable, data); + const extra = extraData(unwrapped_index.getExtra(ip), Tag.Variable, data); return .{ .variable = .{ .ty = extra.ty, .init = extra.init, @@ -5398,18 +5574,20 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .is_weak_linkage = extra.flags.is_weak_linkage, } }; }, - .extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) }, - .func_instance => .{ .func = ip.extraFuncInstance(data) }, - .func_decl => .{ .func = ip.extraFuncDecl(data) }, - .func_coerced => .{ .func = ip.extraFuncCoerced(data) }, + .extern_func => .{ .extern_func = extraData(unwrapped_index.getExtra(ip), Tag.ExternFunc, data) }, + .func_instance => .{ .func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, + .func_decl => .{ .func = extraFuncDecl(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, + .func_coerced => .{ .func = ip.extraFuncCoerced(unwrapped_index.getExtra(ip), data) }, .only_possible_value => { const ty: Index = @enumFromInt(data); - const ty_item = ty.getItem(ip); + const ty_unwrapped = ty.unwrap(ip); + const ty_extra = ty_unwrapped.getExtra(ip); + const ty_item = ty_unwrapped.getItem(ip); return switch (ty_item.tag) { .type_array_big => { const sentinel = @as( *const [1]Index, - @ptrCast(&ip.extra.items[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?]), + @ptrCast(&ty_extra.view().items(.@"0")[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?]), ); return .{ .aggregate = .{ .ty = ty, @@ -5437,9 +5615,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { // There is only one possible value precisely due to the // fact that this values slice is fully populated! .type_struct_anon, .type_tuple_anon => { - const type_struct_anon = ip.extraDataTrail(TypeStructAnon, ty_item.data); + const type_struct_anon = extraDataTrail(ty_extra, TypeStructAnon, ty_item.data); const fields_len = type_struct_anon.data.fields_len; - const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; + const values = ty_extra.view().items(.@"0")[type_struct_anon.end + fields_len ..][0..fields_len]; return .{ .aggregate = .{ .ty = ty, .storage = .{ .elems = @ptrCast(values) }, @@ -5455,62 +5633,65 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }; }, .bytes => { - const extra = ip.extraData(Bytes, data); + const extra = extraData(unwrapped_index.getExtra(ip), Bytes, data); return .{ .aggregate = .{ .ty = extra.ty, .storage = .{ .bytes = extra.bytes }, } }; }, .aggregate => { - const extra = ip.extraDataTrail(Tag.Aggregate, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, Tag.Aggregate, data); const len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(extra.data.ty)); - const fields: []const Index = @ptrCast(ip.extra.items[extra.end..][0..len]); + const fields: []const Index = @ptrCast(extra_list.view().items(.@"0")[extra.end..][0..len]); return .{ .aggregate = .{ .ty = extra.data.ty, .storage = .{ .elems = fields }, } }; }, .repeated => { - const extra = ip.extraData(Repeated, data); + const extra = extraData(unwrapped_index.getExtra(ip), Repeated, data); return .{ .aggregate = .{ .ty = extra.ty, .storage = .{ .repeated_elem = extra.elem_val }, } }; }, - .union_value => .{ .un = ip.extraData(Key.Union, data) }, - .error_set_error => .{ .err = ip.extraData(Key.Error, data) }, + .union_value => .{ .un = extraData(unwrapped_index.getExtra(ip), Key.Union, data) }, + .error_set_error => .{ .err = extraData(unwrapped_index.getExtra(ip), Key.Error, data) }, .error_union_error => { - const extra = ip.extraData(Key.Error, data); + const extra = extraData(unwrapped_index.getExtra(ip), Key.Error, data); return .{ .error_union = .{ .ty = extra.ty, .val = .{ .err_name = extra.name }, } }; }, .error_union_payload => { - const extra = ip.extraData(Tag.TypeValue, data); + const extra = extraData(unwrapped_index.getExtra(ip), Tag.TypeValue, data); return .{ .error_union = .{ .ty = extra.ty, .val = .{ .payload = extra.val }, } }; }, .enum_literal => .{ .enum_literal = @enumFromInt(data) }, - .enum_tag => .{ .enum_tag = ip.extraData(Tag.EnumTag, data) }, + .enum_tag => .{ .enum_tag = extraData(unwrapped_index.getExtra(ip), Tag.EnumTag, data) }, .memoized_call => { - const extra = ip.extraDataTrail(MemoizedCall, data); + const extra_list = unwrapped_index.getExtra(ip); + const extra = extraDataTrail(extra_list, MemoizedCall, data); return .{ .memoized_call = .{ .func = extra.data.func, - .arg_values = @ptrCast(ip.extra.items[extra.end..][0..extra.data.args_len]), + .arg_values = @ptrCast(extra_list.view().items(.@"0")[extra.end..][0..extra.data.args_len]), .result = extra.data.result, } }; }, }; } -fn extraErrorSet(ip: *const InternPool, extra_index: u32) Key.ErrorSetType { - const error_set = ip.extraDataTrail(Tag.ErrorSet, extra_index); +fn extraErrorSet(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.ErrorSetType { + const error_set = extraDataTrail(extra, Tag.ErrorSet, extra_index); return .{ .names = .{ + .tid = tid, .start = @intCast(error_set.end), .len = error_set.data.names_len, }, @@ -5518,60 +5699,67 @@ fn extraErrorSet(ip: *const InternPool, extra_index: u32) Key.ErrorSetType { }; } -fn extraTypeStructAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructType { - const type_struct_anon = ip.extraDataTrail(TypeStructAnon, extra_index); +fn extraTypeStructAnon(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.AnonStructType { + const type_struct_anon = extraDataTrail(extra, TypeStructAnon, extra_index); const fields_len = type_struct_anon.data.fields_len; return .{ .types = .{ + .tid = tid, .start = type_struct_anon.end, .len = fields_len, }, .values = .{ + .tid = tid, .start = type_struct_anon.end + fields_len, .len = fields_len, }, .names = .{ + .tid = tid, .start = type_struct_anon.end + fields_len + fields_len, .len = fields_len, }, }; } -fn extraTypeTupleAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructType { - const type_struct_anon = ip.extraDataTrail(TypeStructAnon, extra_index); +fn extraTypeTupleAnon(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.AnonStructType { + const type_struct_anon = extraDataTrail(extra, TypeStructAnon, extra_index); const fields_len = type_struct_anon.data.fields_len; return .{ .types = .{ + .tid = tid, .start = type_struct_anon.end, .len = fields_len, }, .values = .{ + .tid = tid, .start = type_struct_anon.end + fields_len, .len = fields_len, }, .names = .{ + .tid = tid, .start = 0, .len = 0, }, }; } -fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType { - const type_function = ip.extraDataTrail(Tag.TypeFunction, extra_index); - var index: usize = type_function.end; +fn extraFuncType(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.FuncType { + const type_function = extraDataTrail(extra, Tag.TypeFunction, extra_index); + var trail_index: usize = type_function.end; const comptime_bits: u32 = if (!type_function.data.flags.has_comptime_bits) 0 else b: { - const x = ip.extra.items[index]; - index += 1; + const x = extra.view().items(.@"0")[trail_index]; + trail_index += 1; break :b x; }; const noalias_bits: u32 = if (!type_function.data.flags.has_noalias_bits) 0 else b: { - const x = ip.extra.items[index]; - index += 1; + const x = extra.view().items(.@"0")[trail_index]; + trail_index += 1; break :b x; }; return .{ .param_types = .{ - .start = @intCast(index), + .tid = tid, + .start = @intCast(trail_index), .len = type_function.data.params_len, }, .return_type = type_function.data.return_type, @@ -5587,10 +5775,11 @@ fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType { }; } -fn extraFuncDecl(ip: *const InternPool, extra_index: u32) Key.Func { +fn extraFuncDecl(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.Func { const P = Tag.FuncDecl; - const func_decl = ip.extraDataTrail(P, extra_index); + const func_decl = extraDataTrail(extra, P, extra_index); return .{ + .tid = tid, .ty = func_decl.data.ty, .uncoerced_ty = func_decl.data.ty, .analysis_extra_index = extra_index + std.meta.fieldIndex(P, "analysis").?, @@ -5604,15 +5793,16 @@ fn extraFuncDecl(ip: *const InternPool, extra_index: u32) Key.Func { .lbrace_column = func_decl.data.lbrace_column, .rbrace_column = func_decl.data.rbrace_column, .generic_owner = .none, - .comptime_args = .{ .start = 0, .len = 0 }, + .comptime_args = Index.Slice.empty, }; } -fn extraFuncInstance(ip: *const InternPool, extra_index: u32) Key.Func { +fn extraFuncInstance(ip: *const InternPool, tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.Func { const P = Tag.FuncInstance; - const fi = ip.extraDataTrail(P, extra_index); + const fi = extraDataTrail(extra, P, extra_index); const func_decl = ip.funcDeclInfo(fi.data.generic_owner); return .{ + .tid = tid, .ty = fi.data.ty, .uncoerced_ty = fi.data.ty, .analysis_extra_index = extra_index + std.meta.fieldIndex(P, "analysis").?, @@ -5627,30 +5817,34 @@ fn extraFuncInstance(ip: *const InternPool, extra_index: u32) Key.Func { .rbrace_column = func_decl.rbrace_column, .generic_owner = fi.data.generic_owner, .comptime_args = .{ + .tid = tid, .start = fi.end + @intFromBool(fi.data.analysis.inferred_error_set), .len = ip.funcTypeParamsLen(func_decl.ty), }, }; } -fn extraFuncCoerced(ip: *const InternPool, extra_index: u32) Key.Func { - const func_coerced = ip.extraData(Tag.FuncCoerced, extra_index); - const sub_item = func_coerced.func.getItem(ip); +fn extraFuncCoerced(ip: *const InternPool, extra: Local.Extra, extra_index: u32) Key.Func { + const func_coerced = extraData(extra, Tag.FuncCoerced, extra_index); + const func_unwrapped = func_coerced.func.unwrap(ip); + const sub_item = func_unwrapped.getItem(ip); + const func_extra = func_unwrapped.getExtra(ip); var func: Key.Func = switch (sub_item.tag) { - .func_instance => ip.extraFuncInstance(sub_item.data), - .func_decl => ip.extraFuncDecl(sub_item.data), + .func_instance => ip.extraFuncInstance(func_unwrapped.tid, func_extra, sub_item.data), + .func_decl => extraFuncDecl(func_unwrapped.tid, func_extra, sub_item.data), else => unreachable, }; func.ty = func_coerced.ty; return func; } -fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key { - const int_info = ip.limbData(Int, limb_index); +fn indexToKeyBigInt(ip: *const InternPool, tid: Zcu.PerThread.Id, limb_index: u32, positive: bool) Key { + const limbs_items = ip.getLocalShared(tid).getLimbs().view().items(.@"0"); + const int: Int = @bitCast(limbs_items[limb_index..][0..Int.limbs_items_len].*); return .{ .int = .{ - .ty = int_info.ty, + .ty = int.ty, .storage = .{ .big_int = .{ - .limbs = ip.limbSlice(Int, limb_index, int_info.limbs_len), + .limbs = limbs_items[limb_index + Int.limbs_items_len ..][0..int.limbs_len], .positive = positive, } }, } }; @@ -5791,7 +5985,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All var gop = try ip.getOrPutKey(gpa, tid, key); defer gop.deinit(); if (gop == .existing) return gop.existing; - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); try items.ensureUnusedCapacity(1); switch (key) { .int_type => |int_type| { @@ -5827,7 +6023,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All items.appendAssumeCapacity(.{ .tag = .type_pointer, - .data = try ip.addExtra(gpa, ptr_type_adjusted), + .data = try addExtra(extra, ptr_type_adjusted), }); }, .array_type => |array_type| { @@ -5838,7 +6034,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All if (array_type.sentinel == .none) { items.appendAssumeCapacity(.{ .tag = .type_array_small, - .data = try ip.addExtra(gpa, Vector{ + .data = try addExtra(extra, Vector{ .len = len, .child = array_type.child, }), @@ -5850,7 +6046,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All const length = Array.Length.init(array_type.len); items.appendAssumeCapacity(.{ .tag = .type_array_big, - .data = try ip.addExtra(gpa, Array{ + .data = try addExtra(extra, Array{ .len0 = length.a, .len1 = length.b, .child = array_type.child, @@ -5861,7 +6057,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .vector_type => |vector_type| { items.appendAssumeCapacity(.{ .tag = .type_vector, - .data = try ip.addExtra(gpa, Vector{ + .data = try addExtra(extra, Vector{ .len = vector_type.len, .child = vector_type.child, }), @@ -5887,7 +6083,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .data = @intFromEnum(error_union_type.payload_type), } else .{ .tag = .type_error_union, - .data = try ip.addExtra(gpa, error_union_type), + .data = try addExtra(extra, error_union_type), }); }, .error_set_type => |error_set_type| { @@ -5897,15 +6093,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All const names_map = try ip.addMap(gpa, names.len); addStringsToMap(ip, names_map, names); const names_len = error_set_type.names.len; - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names_len); + try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).Struct.fields.len + names_len); items.appendAssumeCapacity(.{ .tag = .type_error_set, - .data = ip.addExtraAssumeCapacity(Tag.ErrorSet{ + .data = addExtraAssumeCapacity(extra, Tag.ErrorSet{ .names_len = names_len, .names_map = names_map, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast(error_set_type.names.get(ip))); + extra.appendSliceAssumeCapacity(.{@ptrCast(error_set_type.names.get(ip))}); }, .inferred_error_set_type => |ies_index| { items.appendAssumeCapacity(.{ @@ -5914,14 +6110,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }); }, .simple_type => |simple_type| { - assert(@intFromEnum(simple_type) == items.lenPtr().*); + assert(@intFromEnum(simple_type) == items.mutate.len); items.appendAssumeCapacity(.{ .tag = .simple_type, .data = 0, // avoid writing `undefined` bits to a file }); }, .simple_value => |simple_value| { - assert(@intFromEnum(simple_value) == items.lenPtr().*); + assert(@intFromEnum(simple_value) == items.mutate.len); items.appendAssumeCapacity(.{ .tag = .simple_value, .data = 0, // avoid writing `undefined` bits to a file @@ -5950,7 +6146,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All if (has_init) assert(variable.ty == ip.typeOf(variable.init)); items.appendAssumeCapacity(.{ .tag = .variable, - .data = try ip.addExtra(gpa, Tag.Variable{ + .data = try addExtra(extra, Tag.Variable{ .ty = variable.ty, .init = variable.init, .decl = variable.decl, @@ -5970,7 +6166,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All assert(ip.indexToKey(ip.typeOf(slice.ptr)).ptr_type.flags.size == .Many); items.appendAssumeCapacity(.{ .tag = .ptr_slice, - .data = try ip.addExtra(gpa, PtrSlice{ + .data = try addExtra(extra, PtrSlice{ .ty = slice.ty, .ptr = slice.ptr, .len = slice.len, @@ -5984,11 +6180,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All items.appendAssumeCapacity(switch (ptr.base_addr) { .decl => |decl| .{ .tag = .ptr_decl, - .data = try ip.addExtra(gpa, PtrDecl.init(ptr.ty, decl, ptr.byte_offset)), + .data = try addExtra(extra, PtrDecl.init(ptr.ty, decl, ptr.byte_offset)), }, .comptime_alloc => |alloc_index| .{ .tag = .ptr_comptime_alloc, - .data = try ip.addExtra(gpa, PtrComptimeAlloc.init(ptr.ty, alloc_index, ptr.byte_offset)), + .data = try addExtra(extra, PtrComptimeAlloc.init(ptr.ty, alloc_index, ptr.byte_offset)), }, .anon_decl => |anon_decl| if (ptrsHaveSameAlignment(ip, ptr.ty, ptr_type, anon_decl.orig_ty)) item: { if (ptr.ty != anon_decl.orig_ty) { @@ -5999,17 +6195,17 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All } break :item .{ .tag = .ptr_anon_decl, - .data = try ip.addExtra(gpa, PtrAnonDecl.init(ptr.ty, anon_decl.val, ptr.byte_offset)), + .data = try addExtra(extra, PtrAnonDecl.init(ptr.ty, anon_decl.val, ptr.byte_offset)), }; } else .{ .tag = .ptr_anon_decl_aligned, - .data = try ip.addExtra(gpa, PtrAnonDeclAligned.init(ptr.ty, anon_decl.val, anon_decl.orig_ty, ptr.byte_offset)), + .data = try addExtra(extra, PtrAnonDeclAligned.init(ptr.ty, anon_decl.val, anon_decl.orig_ty, ptr.byte_offset)), }, .comptime_field => |field_val| item: { assert(field_val != .none); break :item .{ .tag = .ptr_comptime_field, - .data = try ip.addExtra(gpa, PtrComptimeField.init(ptr.ty, field_val, ptr.byte_offset)), + .data = try addExtra(extra, PtrComptimeField.init(ptr.ty, field_val, ptr.byte_offset)), }; }, .eu_payload, .opt_payload => |base| item: { @@ -6028,12 +6224,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .opt_payload => .ptr_opt_payload, else => unreachable, }, - .data = try ip.addExtra(gpa, PtrBase.init(ptr.ty, base, ptr.byte_offset)), + .data = try addExtra(extra, PtrBase.init(ptr.ty, base, ptr.byte_offset)), }; }, .int => .{ .tag = .ptr_int, - .data = try ip.addExtra(gpa, PtrInt.init(ptr.ty, ptr.byte_offset)), + .data = try addExtra(extra, PtrInt.init(ptr.ty, ptr.byte_offset)), }, .arr_elem, .field => |base_index| { const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type; @@ -6077,7 +6273,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .field => .ptr_field, else => unreachable, }, - .data = try ip.addExtra(gpa, PtrBaseIndex.init(ptr.ty, base_index.base, index_index, ptr.byte_offset)), + .data = try addExtra(extra, PtrBaseIndex.init(ptr.ty, base_index.base, index_index, ptr.byte_offset)), }); return gop.put(); }, @@ -6092,7 +6288,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .data = @intFromEnum(opt.ty), } else .{ .tag = .opt_payload, - .data = try ip.addExtra(gpa, Tag.TypeValue{ + .data = try addExtra(extra, Tag.TypeValue{ .ty = opt.ty, .val = opt.val, }), @@ -6110,7 +6306,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .lazy_align => .int_lazy_align, .lazy_size => .int_lazy_size, }, - .data = try ip.addExtra(gpa, IntLazy{ + .data = try addExtra(extra, IntLazy{ .ty = int.ty, .lazy_ty = lazy_ty, }), @@ -6251,7 +6447,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All if (big_int.to(u32)) |casted| { items.appendAssumeCapacity(.{ .tag = .int_small, - .data = try ip.addExtra(gpa, IntSmall{ + .data = try addExtra(extra, IntSmall{ .ty = int.ty, .value = casted, }), @@ -6266,7 +6462,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All if (std.math.cast(u32, x)) |casted| { items.appendAssumeCapacity(.{ .tag = .int_small, - .data = try ip.addExtra(gpa, IntSmall{ + .data = try addExtra(extra, IntSmall{ .ty = int.ty, .value = casted, }), @@ -6287,7 +6483,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All assert(ip.isErrorSetType(err.ty)); items.appendAssumeCapacity(.{ .tag = .error_set_error, - .data = try ip.addExtra(gpa, err), + .data = try addExtra(extra, err), }); }, @@ -6296,14 +6492,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All items.appendAssumeCapacity(switch (error_union.val) { .err_name => |err_name| .{ .tag = .error_union_error, - .data = try ip.addExtra(gpa, Key.Error{ + .data = try addExtra(extra, Key.Error{ .ty = error_union.ty, .name = err_name, }), }, .payload => |payload| .{ .tag = .error_union_payload, - .data = try ip.addExtra(gpa, Tag.TypeValue{ + .data = try addExtra(extra, Tag.TypeValue{ .ty = error_union.ty, .val = payload, }), @@ -6325,7 +6521,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All } items.appendAssumeCapacity(.{ .tag = .enum_tag, - .data = try ip.addExtra(gpa, enum_tag), + .data = try addExtra(extra, enum_tag), }); }, @@ -6346,29 +6542,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }), .f64_type => items.appendAssumeCapacity(.{ .tag = .float_f64, - .data = try ip.addExtra(gpa, Float64.pack(float.storage.f64)), + .data = try addExtra(extra, Float64.pack(float.storage.f64)), }), .f80_type => items.appendAssumeCapacity(.{ .tag = .float_f80, - .data = try ip.addExtra(gpa, Float80.pack(float.storage.f80)), + .data = try addExtra(extra, Float80.pack(float.storage.f80)), }), .f128_type => items.appendAssumeCapacity(.{ .tag = .float_f128, - .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), + .data = try addExtra(extra, Float128.pack(float.storage.f128)), }), .c_longdouble_type => switch (float.storage) { .f80 => |x| items.appendAssumeCapacity(.{ .tag = .float_c_longdouble_f80, - .data = try ip.addExtra(gpa, Float80.pack(x)), + .data = try addExtra(extra, Float80.pack(x)), }), inline .f16, .f32, .f64, .f128 => |x| items.appendAssumeCapacity(.{ .tag = .float_c_longdouble_f128, - .data = try ip.addExtra(gpa, Float128.pack(x)), + .data = try addExtra(extra, Float128.pack(x)), }), }, .comptime_float_type => items.appendAssumeCapacity(.{ .tag = .float_comptime_float, - .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), + .data = try addExtra(extra, Float128.pack(float.storage.f128)), }), else => unreachable, } @@ -6490,13 +6686,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .repeated_elem => |elem| elem, }; - try ip.extra.ensureUnusedCapacity( - gpa, - @typeInfo(Repeated).Struct.fields.len, - ); + try extra.ensureUnusedCapacity(@typeInfo(Repeated).Struct.fields.len); items.appendAssumeCapacity(.{ .tag = .repeated, - .data = ip.addExtraAssumeCapacity(Repeated{ + .data = addExtraAssumeCapacity(extra, Repeated{ .ty = aggregate.ty, .elem_val = elem, }), @@ -6506,9 +6699,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All if (child == .u8_type) bytes: { const strings = ip.getLocal(tid).getMutableStrings(gpa); - const start = strings.lenPtr().*; + const start = strings.mutate.len; try strings.ensureUnusedCapacity(@intCast(len_including_sentinel + 1)); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); + try extra.ensureUnusedCapacity(@typeInfo(Bytes).Struct.fields.len); switch (aggregate.storage) { .bytes => |bytes| strings.appendSliceAssumeCapacity(.{bytes.toSlice(len, ip)}), .elems => |elems| for (elems[0..@intCast(len)]) |elem| switch (ip.indexToKey(elem)) { @@ -6539,7 +6732,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All ); items.appendAssumeCapacity(.{ .tag = .bytes, - .data = ip.addExtraAssumeCapacity(Bytes{ + .data = addExtraAssumeCapacity(extra, Bytes{ .ty = aggregate.ty, .bytes = string, }), @@ -6547,18 +6740,17 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All return gop.put(); } - try ip.extra.ensureUnusedCapacity( - gpa, + try extra.ensureUnusedCapacity( @typeInfo(Tag.Aggregate).Struct.fields.len + @as(usize, @intCast(len_including_sentinel + 1)), ); items.appendAssumeCapacity(.{ .tag = .aggregate, - .data = ip.addExtraAssumeCapacity(Tag.Aggregate{ + .data = addExtraAssumeCapacity(extra, Tag.Aggregate{ .ty = aggregate.ty, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast(aggregate.storage.elems)); - if (sentinel != .none) ip.extra.appendAssumeCapacity(@intFromEnum(sentinel)); + extra.appendSliceAssumeCapacity(.{@ptrCast(aggregate.storage.elems)}); + if (sentinel != .none) extra.appendAssumeCapacity(.{@intFromEnum(sentinel)}); }, .un => |un| { @@ -6566,23 +6758,23 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All assert(un.val != .none); items.appendAssumeCapacity(.{ .tag = .union_value, - .data = try ip.addExtra(gpa, un), + .data = try addExtra(extra, un), }); }, .memoized_call => |memoized_call| { for (memoized_call.arg_values) |arg| assert(arg != .none); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(MemoizedCall).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(MemoizedCall).Struct.fields.len + memoized_call.arg_values.len); items.appendAssumeCapacity(.{ .tag = .memoized_call, - .data = ip.addExtraAssumeCapacity(MemoizedCall{ + .data = addExtraAssumeCapacity(extra, MemoizedCall{ .func = memoized_call.func, .args_len = @intCast(memoized_call.arg_values.len), .result = memoized_call.result, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast(memoized_call.arg_values)); + extra.appendSliceAssumeCapacity(.{@ptrCast(memoized_call.arg_values)}); }, } return gop.put(); @@ -6639,11 +6831,14 @@ pub fn getUnionType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); const align_elements_len = if (ini.flags.any_aligned_fields) (ini.fields_len + 3) / 4 else 0; const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeUnion).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeUnion).Struct.fields.len + // TODO: fmt bug // zig fmt: off switch (ini.key) { @@ -6653,9 +6848,8 @@ pub fn getUnionType( // zig fmt: on ini.fields_len + // field types align_elements_len); - try items.ensureUnusedCapacity(1); - const extra_index = ip.addExtraAssumeCapacity(Tag.TypeUnion{ + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeUnion{ .flags = .{ .any_captures = ini.key == .declared and ini.key.declared.captures.len != 0, .runtime_tag = ini.flags.runtime_tag, @@ -6686,27 +6880,28 @@ pub fn getUnionType( switch (ini.key) { .declared => |d| if (d.captures.len != 0) { - ip.extra.appendAssumeCapacity(@intCast(d.captures.len)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)); + extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); }, - .reified => |r| _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)), + .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), } // field types if (ini.field_types.len > 0) { assert(ini.field_types.len == ini.fields_len); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.field_types)); + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.field_types)}); } else { - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); } // field alignments if (ini.flags.any_aligned_fields) { - ip.extra.appendNTimesAssumeCapacity(align_element, align_elements_len); + extra.appendNTimesAssumeCapacity(.{align_element}, align_elements_len); if (ini.field_aligns.len > 0) { assert(ini.field_aligns.len == ini.fields_len); @memcpy((Alignment.Slice{ - .start = @intCast(ip.extra.items.len - align_elements_len), + .tid = tid, + .start = @intCast(extra.mutate.len - align_elements_len), .len = @intCast(ini.field_aligns.len), }).get(ip), ini.field_aligns); } @@ -6715,6 +6910,7 @@ pub fn getUnionType( } return .{ .wip = .{ + .tid = tid, .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "decl").?, .namespace_extra_index = if (ini.has_namespace) @@ -6725,13 +6921,15 @@ pub fn getUnionType( } pub const WipNamespaceType = struct { + tid: Zcu.PerThread.Id, index: Index, decl_extra_index: u32, namespace_extra_index: ?u32, pub fn finish(wip: WipNamespaceType, ip: *InternPool, decl: DeclIndex, namespace: OptionalNamespaceIndex) Index { - ip.extra.items[wip.decl_extra_index] = @intFromEnum(decl); + const extra_items = ip.getLocalShared(wip.tid).extra.acquire().view().items(.@"0"); + extra_items[wip.decl_extra_index] = @intFromEnum(decl); if (wip.namespace_extra_index) |i| { - ip.extra.items[i] = @intFromEnum(namespace.unwrap().?); + extra_items[i] = @intFromEnum(namespace.unwrap().?); } else { assert(namespace == .none); } @@ -6789,7 +6987,9 @@ pub fn getStructType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); const names_map = try ip.addMap(gpa, ini.fields_len); errdefer _ = ip.maps.pop(); @@ -6802,7 +7002,7 @@ pub fn getStructType( .auto => false, .@"extern" => true, .@"packed" => { - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStructPacked).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStructPacked).Struct.fields.len + // TODO: fmt bug // zig fmt: off switch (ini.key) { @@ -6813,7 +7013,7 @@ pub fn getStructType( ini.fields_len + // types ini.fields_len + // names ini.fields_len); // inits - const extra_index = ip.addExtraAssumeCapacity(Tag.TypeStructPacked{ + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStructPacked{ .decl = undefined, // set by `finish` .zir_index = zir_index, .fields_len = ini.fields_len, @@ -6833,19 +7033,20 @@ pub fn getStructType( }); switch (ini.key) { .declared => |d| if (d.captures.len != 0) { - ip.extra.appendAssumeCapacity(@intCast(d.captures.len)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)); + extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); }, .reified => |r| { - _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)); + _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)); }, } - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(OptionalNullTerminatedString.none), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(OptionalNullTerminatedString.none)}, ini.fields_len); if (ini.any_default_inits) { - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); } return .{ .wip = .{ + .tid = tid, .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "decl").?, .namespace_extra_index = if (ini.has_namespace) @@ -6860,7 +7061,7 @@ pub fn getStructType( const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4); const comptime_elements_len = if (ini.any_comptime_fields) (ini.fields_len + 31) / 32 else 0; - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStruct).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStruct).Struct.fields.len + // TODO: fmt bug // zig fmt: off switch (ini.key) { @@ -6871,7 +7072,7 @@ pub fn getStructType( (ini.fields_len * 5) + // types, names, inits, runtime order, offsets align_elements_len + comptime_elements_len + 2); // names_map + namespace - const extra_index = ip.addExtraAssumeCapacity(Tag.TypeStruct{ + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStruct{ .decl = undefined, // set by `finish` .zir_index = zir_index, .fields_len = ini.fields_len, @@ -6905,36 +7106,37 @@ pub fn getStructType( }); switch (ini.key) { .declared => |d| if (d.captures.len != 0) { - ip.extra.appendAssumeCapacity(@intCast(d.captures.len)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)); + extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); }, .reified => |r| { - _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)); + _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)); }, } - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); if (!ini.is_tuple) { - ip.extra.appendAssumeCapacity(@intFromEnum(names_map)); - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(OptionalNullTerminatedString.none), ini.fields_len); + extra.appendAssumeCapacity(.{@intFromEnum(names_map)}); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(OptionalNullTerminatedString.none)}, ini.fields_len); } if (ini.any_default_inits) { - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); } const namespace_extra_index: ?u32 = if (ini.has_namespace) i: { - ip.extra.appendAssumeCapacity(undefined); // set by `finish` - break :i @intCast(ip.extra.items.len - 1); + extra.appendAssumeCapacity(undefined); // set by `finish` + break :i @intCast(extra.mutate.len - 1); } else null; if (ini.any_aligned_fields) { - ip.extra.appendNTimesAssumeCapacity(align_element, align_elements_len); + extra.appendNTimesAssumeCapacity(.{align_element}, align_elements_len); } if (ini.any_comptime_fields) { - ip.extra.appendNTimesAssumeCapacity(0, comptime_elements_len); + extra.appendNTimesAssumeCapacity(.{0}, comptime_elements_len); } if (ini.layout == .auto) { - ip.extra.appendNTimesAssumeCapacity(@intFromEnum(LoadedStructType.RuntimeOrder.unresolved), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(LoadedStructType.RuntimeOrder.unresolved)}, ini.fields_len); } - ip.extra.appendNTimesAssumeCapacity(std.math.maxInt(u32), ini.fields_len); + extra.appendNTimesAssumeCapacity(.{std.math.maxInt(u32)}, ini.fields_len); return .{ .wip = .{ + .tid = tid, .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "decl").?, .namespace_extra_index = namespace_extra_index, @@ -6958,34 +7160,35 @@ pub fn getAnonStructType( assert(ini.types.len == ini.values.len); for (ini.types) |elem| assert(elem != .none); - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); - const prev_extra_len = ip.extra.items.len; + const prev_extra_len = extra.mutate.len; const fields_len: u32 = @intCast(ini.types.len); - try ip.extra.ensureUnusedCapacity( - gpa, + try items.ensureUnusedCapacity(1); + try extra.ensureUnusedCapacity( @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3), ); - try items.ensureUnusedCapacity(1); - const extra_index = ip.addExtraAssumeCapacity(TypeStructAnon{ + const extra_index = addExtraAssumeCapacity(extra, TypeStructAnon{ .fields_len = fields_len, }); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.types)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values)); - errdefer ip.extra.items.len = prev_extra_len; + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.types)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.values)}); + errdefer extra.mutate.len = prev_extra_len; var gop = try ip.getOrPutKey(gpa, tid, .{ - .anon_struct_type = if (ini.names.len == 0) extraTypeTupleAnon(ip, extra_index) else k: { + .anon_struct_type = if (ini.names.len == 0) extraTypeTupleAnon(tid, extra.list.*, extra_index) else k: { assert(ini.names.len == ini.types.len); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names)); - break :k extraTypeStructAnon(ip, extra_index); + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.names)}); + break :k extraTypeStructAnon(tid, extra.list.*, extra_index); }, }); defer gop.deinit(); if (gop == .existing) { - ip.extra.items.len = prev_extra_len; + extra.mutate.len = prev_extra_len; return gop.existing; } @@ -7021,21 +7224,23 @@ pub fn getFuncType( assert(key.return_type != .none); for (key.param_types) |param_type| assert(param_type != .none); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); + // The strategy here is to add the function type unconditionally, then to // ask if it already exists, and if so, revert the lengths of the mutated // arrays. This is similar to what `getOrPutTrailingString` does. - const prev_extra_len = ip.extra.items.len; + const prev_extra_len = extra.mutate.len; const params_len: u32 = @intCast(key.param_types.len); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeFunction).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeFunction).Struct.fields.len + @intFromBool(key.comptime_bits != 0) + @intFromBool(key.noalias_bits != 0) + params_len); - const items = ip.getLocal(tid).getMutableItems(gpa); - try items.ensureUnusedCapacity(1); - - const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ + const func_type_extra_index = addExtraAssumeCapacity(extra, Tag.TypeFunction{ .params_len = params_len, .return_type = key.return_type, .flags = .{ @@ -7051,17 +7256,17 @@ pub fn getFuncType( }, }); - if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); - if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); - ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); - errdefer ip.extra.items.len = prev_extra_len; + if (key.comptime_bits != 0) extra.appendAssumeCapacity(.{key.comptime_bits}); + if (key.noalias_bits != 0) extra.appendAssumeCapacity(.{key.noalias_bits}); + extra.appendSliceAssumeCapacity(.{@ptrCast(key.param_types)}); + errdefer extra.mutate.len = prev_extra_len; var gop = try ip.getOrPutKey(gpa, tid, .{ - .func_type = extraFuncType(ip, func_type_extra_index), + .func_type = extraFuncType(tid, extra.list.*, func_type_extra_index), }); defer gop.deinit(); if (gop == .existing) { - ip.extra.items.len = prev_extra_len; + extra.mutate.len = prev_extra_len; return gop.existing; } @@ -7081,15 +7286,20 @@ pub fn getExternFunc( var gop = try ip.getOrPutKey(gpa, tid, .{ .extern_func = key }); defer gop.deinit(); if (gop == .existing) return gop.existing; - const prev_extra_len = ip.extra.items.len; - const extra_index = try ip.addExtra(gpa, @as(Tag.ExternFunc, key)); - errdefer ip.extra.items.len = prev_extra_len; - const items = ip.getLocal(tid).getMutableItems(gpa); - try items.append(.{ + + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); + + const prev_extra_len = extra.mutate.len; + const extra_index = try addExtra(extra, @as(Tag.ExternFunc, key)); + errdefer extra.mutate.len = prev_extra_len; + items.appendAssumeCapacity(.{ .tag = .extern_func, .data = extra_index, }); - errdefer items.lenPtr().* -= 1; + errdefer items.mutate.len -= 1; return gop.put(); } @@ -7111,17 +7321,19 @@ pub fn getFuncDecl( tid: Zcu.PerThread.Id, key: GetFuncDeclKey, ) Allocator.Error!Index { + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); + // The strategy here is to add the function type unconditionally, then to // ask if it already exists, and if so, revert the lengths of the mutated // arrays. This is similar to what `getOrPutTrailingString` does. - const prev_extra_len = ip.extra.items.len; - - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len); + const prev_extra_len = extra.mutate.len; - const items = ip.getLocal(tid).getMutableItems(gpa); - try items.ensureUnusedCapacity(1); + try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncDecl).Struct.fields.len); - const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ + const func_decl_extra_index = addExtraAssumeCapacity(extra, Tag.FuncDecl{ .analysis = .{ .state = if (key.cc == .Inline) .inline_only else .none, .is_cold = false, @@ -7138,14 +7350,14 @@ pub fn getFuncDecl( .lbrace_column = key.lbrace_column, .rbrace_column = key.rbrace_column, }); - errdefer ip.extra.items.len = prev_extra_len; + errdefer extra.mutate.len = prev_extra_len; var gop = try ip.getOrPutKey(gpa, tid, .{ - .func = extraFuncDecl(ip, func_decl_extra_index), + .func = extraFuncDecl(tid, extra.list.*, func_decl_extra_index), }); defer gop.deinit(); if (gop == .existing) { - ip.extra.items.len = prev_extra_len; + extra.mutate.len = prev_extra_len; return gop.existing; } @@ -7188,13 +7400,18 @@ pub fn getFuncDeclIes( assert(key.bare_return_type != .none); for (key.param_types) |param_type| assert(param_type != .none); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + try items.ensureUnusedCapacity(4); + const extra = local.getMutableExtra(gpa); + // The strategy here is to add the function decl unconditionally, then to // ask if it already exists, and if so, revert the lengths of the mutated // arrays. This is similar to what `getOrPutTrailingString` does. - const prev_extra_len = ip.extra.items.len; + const prev_extra_len = extra.mutate.len; const params_len: u32 = @intCast(key.param_types.len); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncDecl).Struct.fields.len + 1 + // inferred_error_set @typeInfo(Tag.ErrorUnionType).Struct.fields.len + @typeInfo(Tag.TypeFunction).Struct.fields.len + @@ -7202,27 +7419,24 @@ pub fn getFuncDeclIes( @intFromBool(key.noalias_bits != 0) + params_len); - const items = ip.getLocal(tid).getMutableItems(gpa); - try items.ensureUnusedCapacity(4); - const func_index = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 0, + .index = items.mutate.len + 0, }, ip); const error_union_type = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 1, + .index = items.mutate.len + 1, }, ip); const error_set_type = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 2, + .index = items.mutate.len + 2, }, ip); const func_ty = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 3, + .index = items.mutate.len + 3, }, ip); - const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ + const func_decl_extra_index = addExtraAssumeCapacity(extra, Tag.FuncDecl{ .analysis = .{ .state = if (key.cc == .Inline) .inline_only else .none, .is_cold = false, @@ -7239,9 +7453,9 @@ pub fn getFuncDeclIes( .lbrace_column = key.lbrace_column, .rbrace_column = key.rbrace_column, }); - ip.extra.appendAssumeCapacity(@intFromEnum(Index.none)); + extra.appendAssumeCapacity(.{@intFromEnum(Index.none)}); - const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ + const func_type_extra_index = addExtraAssumeCapacity(extra, Tag.TypeFunction{ .params_len = params_len, .return_type = error_union_type, .flags = .{ @@ -7256,9 +7470,9 @@ pub fn getFuncDeclIes( .addrspace_is_generic = key.addrspace_is_generic, }, }); - if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); - if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); - ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); + if (key.comptime_bits != 0) extra.appendAssumeCapacity(.{key.comptime_bits}); + if (key.noalias_bits != 0) extra.appendAssumeCapacity(.{key.noalias_bits}); + extra.appendSliceAssumeCapacity(.{@ptrCast(key.param_types)}); items.appendSliceAssumeCapacity(.{ .tag = &.{ @@ -7269,7 +7483,7 @@ pub fn getFuncDeclIes( }, .data = &.{ func_decl_extra_index, - ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ + addExtraAssumeCapacity(extra, Tag.ErrorUnionType{ .error_set_type = error_set_type, .payload_type = key.bare_return_type, }), @@ -7278,18 +7492,18 @@ pub fn getFuncDeclIes( }, }); errdefer { - items.lenPtr().* -= 4; - ip.extra.items.len = prev_extra_len; + items.mutate.len -= 4; + extra.mutate.len = prev_extra_len; } var func_gop = try ip.getOrPutKey(gpa, tid, .{ - .func = extraFuncDecl(ip, func_decl_extra_index), + .func = extraFuncDecl(tid, extra.list.*, func_decl_extra_index), }); defer func_gop.deinit(); if (func_gop == .existing) { // An existing function type was found; undo the additions to our two arrays. - items.lenPtr().* -= 4; - ip.extra.items.len = prev_extra_len; + items.mutate.len -= 4; + extra.mutate.len = prev_extra_len; return func_gop.existing; } var error_union_type_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ @@ -7302,7 +7516,7 @@ pub fn getFuncDeclIes( }); defer error_set_type_gop.deinit(); var func_ty_gop = try ip.getOrPutKey(gpa, tid, .{ - .func_type = extraFuncType(ip, func_type_extra_index), + .func_type = extraFuncType(tid, extra.list.*, func_type_extra_index), }); defer func_ty_gop.deinit(); assert(func_gop.putAt(3) == func_index); @@ -7320,38 +7534,40 @@ pub fn getErrorSetType( ) Allocator.Error!Index { assert(std.sort.isSorted(NullTerminatedString, names, {}, NullTerminatedString.indexLessThan)); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); + try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).Struct.fields.len + names.len); + // The strategy here is to add the type unconditionally, then to ask if it // already exists, and if so, revert the lengths of the mutated arrays. // This is similar to what `getOrPutTrailingString` does. - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names.len); - - const prev_extra_len = ip.extra.items.len; - errdefer ip.extra.items.len = prev_extra_len; + const prev_extra_len = extra.mutate.len; + errdefer extra.mutate.len = prev_extra_len; const predicted_names_map: MapIndex = @enumFromInt(ip.maps.items.len); - const error_set_extra_index = ip.addExtraAssumeCapacity(Tag.ErrorSet{ + const error_set_extra_index = addExtraAssumeCapacity(extra, Tag.ErrorSet{ .names_len = @intCast(names.len), .names_map = predicted_names_map, }); - ip.extra.appendSliceAssumeCapacity(@ptrCast(names)); - errdefer ip.extra.items.len = prev_extra_len; + extra.appendSliceAssumeCapacity(.{@ptrCast(names)}); + errdefer extra.mutate.len = prev_extra_len; var gop = try ip.getOrPutKey(gpa, tid, .{ - .error_set_type = extraErrorSet(ip, error_set_extra_index), + .error_set_type = extraErrorSet(tid, extra.list.*, error_set_extra_index), }); defer gop.deinit(); if (gop == .existing) { - ip.extra.items.len = prev_extra_len; + extra.mutate.len = prev_extra_len; return gop.existing; } - const items = ip.getLocal(tid).getMutableItems(gpa); try items.append(.{ .tag = .type_error_set, .data = error_set_extra_index, }); - errdefer items.lenPtr().* -= 1; + errdefer items.mutate.len -= 1; const names_map = try ip.addMap(gpa, names.len); assert(names_map == predicted_names_map); @@ -7396,16 +7612,20 @@ pub fn getFuncInstance( .is_noinline = arg.is_noinline, }); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); + try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncInstance).Struct.fields.len + + arg.comptime_args.len); + const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner); assert(arg.comptime_args.len == ip.funcTypeParamsLen(ip.typeOf(generic_owner))); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncInstance).Struct.fields.len + - arg.comptime_args.len); - const prev_extra_len = ip.extra.items.len; - errdefer ip.extra.items.len = prev_extra_len; + const prev_extra_len = extra.mutate.len; + errdefer extra.mutate.len = prev_extra_len; - const func_extra_index = ip.addExtraAssumeCapacity(Tag.FuncInstance{ + const func_extra_index = addExtraAssumeCapacity(extra, Tag.FuncInstance{ .analysis = .{ .state = if (arg.cc == .Inline) .inline_only else .none, .is_cold = false, @@ -7421,28 +7641,28 @@ pub fn getFuncInstance( .branch_quota = 0, .generic_owner = generic_owner, }); - ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.comptime_args)); + extra.appendSliceAssumeCapacity(.{@ptrCast(arg.comptime_args)}); var gop = try ip.getOrPutKey(gpa, tid, .{ - .func = extraFuncInstance(ip, func_extra_index), + .func = ip.extraFuncInstance(tid, extra.list.*, func_extra_index), }); defer gop.deinit(); if (gop == .existing) { - ip.extra.items.len = prev_extra_len; + extra.mutate.len = prev_extra_len; return gop.existing; } - const items = ip.getLocal(tid).getMutableItems(gpa); - const func_index = Index.Unwrapped.wrap(.{ .tid = tid, .index = items.lenPtr().* }, ip); + const func_index = Index.Unwrapped.wrap(.{ .tid = tid, .index = items.mutate.len }, ip); try items.append(.{ .tag = .func_instance, .data = func_extra_index, }); - errdefer items.lenPtr().* -= 1; + errdefer items.mutate.len -= 1; try finishFuncInstance( ip, gpa, tid, + extra, generic_owner, func_index, func_extra_index, @@ -7466,15 +7686,20 @@ pub fn getFuncInstanceIes( assert(arg.bare_return_type != .none); for (arg.param_types) |param_type| assert(param_type != .none); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); + try items.ensureUnusedCapacity(4); + const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner); // The strategy here is to add the function decl unconditionally, then to // ask if it already exists, and if so, revert the lengths of the mutated // arrays. This is similar to what `getOrPutTrailingString` does. - const prev_extra_len = ip.extra.items.len; + const prev_extra_len = extra.mutate.len; const params_len: u32 = @intCast(arg.param_types.len); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncInstance).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncInstance).Struct.fields.len + 1 + // inferred_error_set arg.comptime_args.len + @typeInfo(Tag.ErrorUnionType).Struct.fields.len + @@ -7482,27 +7707,24 @@ pub fn getFuncInstanceIes( @intFromBool(arg.noalias_bits != 0) + params_len); - const items = ip.getLocal(tid).getMutableItems(gpa); - try items.ensureUnusedCapacity(4); - const func_index = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 0, + .index = items.mutate.len + 0, }, ip); const error_union_type = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 1, + .index = items.mutate.len + 1, }, ip); const error_set_type = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 2, + .index = items.mutate.len + 2, }, ip); const func_ty = Index.Unwrapped.wrap(.{ .tid = tid, - .index = items.lenPtr().* + 3, + .index = items.mutate.len + 3, }, ip); - const func_extra_index = ip.addExtraAssumeCapacity(Tag.FuncInstance{ + const func_extra_index = addExtraAssumeCapacity(extra, Tag.FuncInstance{ .analysis = .{ .state = if (arg.cc == .Inline) .inline_only else .none, .is_cold = false, @@ -7518,10 +7740,10 @@ pub fn getFuncInstanceIes( .branch_quota = 0, .generic_owner = generic_owner, }); - ip.extra.appendAssumeCapacity(@intFromEnum(Index.none)); // resolved error set - ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.comptime_args)); + extra.appendAssumeCapacity(.{@intFromEnum(Index.none)}); // resolved error set + extra.appendSliceAssumeCapacity(.{@ptrCast(arg.comptime_args)}); - const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ + const func_type_extra_index = addExtraAssumeCapacity(extra, Tag.TypeFunction{ .params_len = params_len, .return_type = error_union_type, .flags = .{ @@ -7537,8 +7759,8 @@ pub fn getFuncInstanceIes( }, }); // no comptime_bits because has_comptime_bits is false - if (arg.noalias_bits != 0) ip.extra.appendAssumeCapacity(arg.noalias_bits); - ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.param_types)); + if (arg.noalias_bits != 0) extra.appendAssumeCapacity(.{arg.noalias_bits}); + extra.appendSliceAssumeCapacity(.{@ptrCast(arg.param_types)}); items.appendSliceAssumeCapacity(.{ .tag = &.{ @@ -7549,7 +7771,7 @@ pub fn getFuncInstanceIes( }, .data = &.{ func_extra_index, - ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ + addExtraAssumeCapacity(extra, Tag.ErrorUnionType{ .error_set_type = error_set_type, .payload_type = arg.bare_return_type, }), @@ -7558,18 +7780,18 @@ pub fn getFuncInstanceIes( }, }); errdefer { - items.lenPtr().* -= 4; - ip.extra.items.len = prev_extra_len; + items.mutate.len -= 4; + extra.mutate.len = prev_extra_len; } var func_gop = try ip.getOrPutKey(gpa, tid, .{ - .func = extraFuncInstance(ip, func_extra_index), + .func = ip.extraFuncInstance(tid, extra.list.*, func_extra_index), }); defer func_gop.deinit(); if (func_gop == .existing) { // Hot path: undo the additions to our two arrays. - items.lenPtr().* -= 4; - ip.extra.items.len = prev_extra_len; + items.mutate.len -= 4; + extra.mutate.len = prev_extra_len; return func_gop.existing; } var error_union_type_gop = try ip.getOrPutKey(gpa, tid, .{ .error_union_type = .{ @@ -7582,13 +7804,14 @@ pub fn getFuncInstanceIes( }); defer error_set_type_gop.deinit(); var func_ty_gop = try ip.getOrPutKey(gpa, tid, .{ - .func_type = extraFuncType(ip, func_type_extra_index), + .func_type = extraFuncType(tid, extra.list.*, func_type_extra_index), }); defer func_ty_gop.deinit(); try finishFuncInstance( ip, gpa, tid, + extra, generic_owner, func_index, func_extra_index, @@ -7606,6 +7829,7 @@ fn finishFuncInstance( ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, + extra: Local.Extra.Mutable, generic_owner: Index, func_index: Index, func_extra_index: u32, @@ -7631,7 +7855,7 @@ fn finishFuncInstance( errdefer ip.destroyDecl(gpa, decl_index); // Populate the owner_decl field which was left undefined until now. - ip.extra.items[ + extra.view().items(.@"0")[ func_extra_index + std.meta.fieldIndex(Tag.FuncInstance, "owner_decl").? ] = @intFromEnum(decl_index); @@ -7660,6 +7884,7 @@ pub const EnumTypeInit = struct { }; pub const WipEnumType = struct { + tid: Zcu.PerThread.Id, index: Index, tag_ty_index: u32, decl_index: u32, @@ -7675,9 +7900,11 @@ pub const WipEnumType = struct { decl: DeclIndex, namespace: OptionalNamespaceIndex, ) void { - ip.extra.items[wip.decl_index] = @intFromEnum(decl); + const extra = ip.getLocalShared(wip.tid).extra.acquire(); + const extra_items = extra.view().items(.@"0"); + extra_items[wip.decl_index] = @intFromEnum(decl); if (wip.namespace_index) |i| { - ip.extra.items[i] = @intFromEnum(namespace.unwrap().?); + extra_items[i] = @intFromEnum(namespace.unwrap().?); } else { assert(namespace == .none); } @@ -7685,7 +7912,8 @@ pub const WipEnumType = struct { pub fn setTagTy(wip: WipEnumType, ip: *InternPool, tag_ty: Index) void { assert(ip.isIntegerType(tag_ty)); - ip.extra.items[wip.tag_ty_index] = @intFromEnum(tag_ty); + const extra = ip.getLocalShared(wip.tid).extra.acquire(); + extra.view().items(.@"0")[wip.tag_ty_index] = @intFromEnum(tag_ty); } pub const FieldConflict = struct { @@ -7697,23 +7925,26 @@ pub const WipEnumType = struct { /// If the enum is automatially numbered, `value` must be `.none`. /// Otherwise, the type of `value` must be the integer tag type of the enum. pub fn nextField(wip: WipEnumType, ip: *InternPool, name: NullTerminatedString, value: Index) ?FieldConflict { - if (ip.addFieldName(wip.names_map, wip.names_start, name)) |conflict| { + const unwrapped_index = wip.index.unwrap(ip); + const extra_list = ip.getLocalShared(unwrapped_index.tid).extra.acquire(); + const extra_items = extra_list.view().items(.@"0"); + if (ip.addFieldName(extra_list, wip.names_map, wip.names_start, name)) |conflict| { return .{ .kind = .name, .prev_field_idx = conflict }; } if (value == .none) { assert(wip.values_map == .none); return null; } - assert(ip.typeOf(value) == @as(Index, @enumFromInt(ip.extra.items[wip.tag_ty_index]))); + assert(ip.typeOf(value) == @as(Index, @enumFromInt(extra_items[wip.tag_ty_index]))); const map = &ip.maps.items[@intFromEnum(wip.values_map.unwrap().?)]; const field_index = map.count(); - const indexes = ip.extra.items[wip.values_start..][0..field_index]; + const indexes = extra_items[wip.values_start..][0..field_index]; const adapter: Index.Adapter = .{ .indexes = @ptrCast(indexes) }; const gop = map.getOrPutAssumeCapacityAdapted(value, adapter); if (gop.found_existing) { return .{ .kind = .value, .prev_field_idx = @intCast(gop.index) }; } - ip.extra.items[wip.values_start + field_index] = @intFromEnum(value); + extra_items[wip.values_start + field_index] = @intFromEnum(value); return null; } @@ -7746,8 +7977,10 @@ pub fn getEnumType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); const names_map = try ip.addMap(gpa, ini.fields_len); errdefer _ = ip.maps.pop(); @@ -7755,7 +7988,7 @@ pub fn getEnumType( switch (ini.tag_mode) { .auto => { assert(!ini.has_values); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(EnumAuto).Struct.fields.len + // TODO: fmt bug // zig fmt: off switch (ini.key) { @@ -7765,7 +7998,7 @@ pub fn getEnumType( // zig fmt: on ini.fields_len); // field types - const extra_index = ip.addExtraAssumeCapacity(EnumAuto{ + const extra_index = addExtraAssumeCapacity(extra, EnumAuto{ .decl = undefined, // set by `prepare` .captures_len = switch (ini.key) { .declared => |d| @intCast(d.captures.len), @@ -7784,12 +8017,13 @@ pub fn getEnumType( .data = extra_index, }); switch (ini.key) { - .declared => |d| ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)), - .reified => |r| _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)), + .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), + .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), } - const names_start = ip.extra.items.len; - ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); + const names_start = extra.mutate.len; + _ = extra.addManyAsSliceAssumeCapacity(ini.fields_len); return .{ .wip = .{ + .tid = tid, .index = gop.put(), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, @@ -7809,7 +8043,7 @@ pub fn getEnumType( _ = ip.maps.pop(); }; - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(EnumExplicit).Struct.fields.len + // TODO: fmt bug // zig fmt: off switch (ini.key) { @@ -7820,7 +8054,7 @@ pub fn getEnumType( ini.fields_len + // field types ini.fields_len * @intFromBool(ini.has_values)); // field values - const extra_index = ip.addExtraAssumeCapacity(EnumExplicit{ + const extra_index = addExtraAssumeCapacity(extra, EnumExplicit{ .decl = undefined, // set by `prepare` .captures_len = switch (ini.key) { .declared => |d| @intCast(d.captures.len), @@ -7844,16 +8078,17 @@ pub fn getEnumType( .data = extra_index, }); switch (ini.key) { - .declared => |d| ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)), - .reified => |r| _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)), + .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), + .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), } - const names_start = ip.extra.items.len; - ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); - const values_start = ip.extra.items.len; + const names_start = extra.mutate.len; + _ = extra.addManyAsSliceAssumeCapacity(ini.fields_len); + const values_start = extra.mutate.len; if (ini.has_values) { - ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len); + _ = extra.addManyAsSliceAssumeCapacity(ini.fields_len); } return .{ .wip = .{ + .tid = tid, .index = gop.put(), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, @@ -7889,8 +8124,10 @@ pub fn getGeneratedTagEnumType( assert(ip.isIntegerType(ini.tag_ty)); for (ini.values) |val| assert(ip.typeOf(val) == ini.tag_ty); - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); const names_map = try ip.addMap(gpa, ini.names.len); errdefer _ = ip.maps.pop(); @@ -7898,15 +8135,15 @@ pub fn getGeneratedTagEnumType( const fields_len: u32 = @intCast(ini.names.len); - const prev_extra_len = ip.extra.items.len; + const prev_extra_len = extra.mutate.len; switch (ini.tag_mode) { .auto => { - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(EnumAuto).Struct.fields.len + 1 + // owner_union fields_len); // field names items.appendAssumeCapacity(.{ .tag = .type_enum_auto, - .data = ip.addExtraAssumeCapacity(EnumAuto{ + .data = addExtraAssumeCapacity(extra, EnumAuto{ .decl = ini.decl, .captures_len = 0, .namespace = .none, @@ -7916,11 +8153,11 @@ pub fn getGeneratedTagEnumType( .zir_index = .none, }), }); - ip.extra.appendAssumeCapacity(@intFromEnum(ini.owner_union_ty)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names)); + extra.appendAssumeCapacity(.{@intFromEnum(ini.owner_union_ty)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.names)}); }, .explicit, .nonexhaustive => { - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + + try extra.ensureUnusedCapacity(@typeInfo(EnumExplicit).Struct.fields.len + 1 + // owner_union fields_len + // field names ini.values.len); // field values @@ -7939,7 +8176,7 @@ pub fn getGeneratedTagEnumType( .nonexhaustive => .type_enum_nonexhaustive, .auto => unreachable, }, - .data = ip.addExtraAssumeCapacity(EnumExplicit{ + .data = addExtraAssumeCapacity(extra, EnumExplicit{ .decl = ini.decl, .captures_len = 0, .namespace = .none, @@ -7950,12 +8187,12 @@ pub fn getGeneratedTagEnumType( .zir_index = .none, }), }); - ip.extra.appendAssumeCapacity(@intFromEnum(ini.owner_union_ty)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names)); - ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values)); + extra.appendAssumeCapacity(.{@intFromEnum(ini.owner_union_ty)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.names)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.values)}); }, } - errdefer ip.extra.items.len = prev_extra_len; + errdefer extra.mutate.len = prev_extra_len; errdefer switch (ini.tag_mode) { .auto => {}, .explicit, .nonexhaustive => _ = if (ini.values.len != 0) ip.maps.pop(), @@ -8001,14 +8238,16 @@ pub fn getOpaqueType( defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); + const extra = local.getMutableExtra(gpa); try items.ensureUnusedCapacity(1); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeOpaque).Struct.fields.len + switch (ini.key) { + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeOpaque).Struct.fields.len + switch (ini.key) { .declared => |d| d.captures.len, .reified => 0, }); - const extra_index = ip.addExtraAssumeCapacity(Tag.TypeOpaque{ + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeOpaque{ .decl = undefined, // set by `finish` .namespace = .none, .zir_index = switch (ini.key) { @@ -8024,10 +8263,11 @@ pub fn getOpaqueType( .data = extra_index, }); switch (ini.key) { - .declared => |d| ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)), + .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), .reified => {}, } return .{ .wip = .{ + .tid = tid, .index = gop.put(), .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "decl").?, .namespace_extra_index = if (ini.has_namespace) @@ -8092,19 +8332,19 @@ fn addMap(ip: *InternPool, gpa: Allocator, cap: usize) Allocator.Error!MapIndex /// Leak the index until the next garbage collection. /// Invalidates all references to this index. pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void { - const unwrapped = index.unwrap(ip); + const unwrapped_index = index.unwrap(ip); if (@intFromEnum(index) < static_keys.len) { // The item being removed replaced a special index via `InternPool.resolveBuiltinType`. // Restore the original item at this index. assert(static_keys[@intFromEnum(index)] == .simple_type); - const items = ip.getLocalShared(unwrapped.tid).items.view(); - @atomicStore(Tag, &items.items(.tag)[unwrapped.index], .simple_type, .monotonic); + const items = ip.getLocalShared(unwrapped_index.tid).items.acquire().view(); + @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], .simple_type, .monotonic); return; } - if (unwrapped.tid == tid) { - const items_len = &ip.getLocal(unwrapped.tid).mutate.items.len; - if (unwrapped.index == items_len.* - 1) { + if (unwrapped_index.tid == tid) { + const items_len = &ip.getLocal(unwrapped_index.tid).mutate.items.len; + if (unwrapped_index.index == items_len.* - 1) { // Happy case - we can just drop the item without affecting any other indices. items_len.* -= 1; return; @@ -8114,8 +8354,8 @@ pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void { // We must preserve the item so that indices following it remain valid. // Thus, we will rewrite the tag to `removed`, leaking the item until // next GC but causing `KeyAdapter` to ignore it. - const items = ip.getLocalShared(unwrapped.tid).items.view(); - @atomicStore(Tag, &items.items(.tag)[unwrapped.index], .removed, .monotonic); + const items = ip.getLocalShared(unwrapped_index.tid).items.acquire().view(); + @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], .removed, .monotonic); } fn addInt( @@ -8126,28 +8366,32 @@ fn addInt( tag: Tag, limbs: []const Limb, ) !void { + const local = ip.getLocal(tid); + const items_list = local.getMutableItems(gpa); + const limbs_list = local.getMutableLimbs(gpa); const limbs_len: u32 = @intCast(limbs.len); - try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); - ip.getLocal(tid).getMutableItems(gpa).appendAssumeCapacity(.{ + try limbs_list.ensureUnusedCapacity(Int.limbs_items_len + limbs_len); + items_list.appendAssumeCapacity(.{ .tag = tag, - .data = ip.addLimbsExtraAssumeCapacity(Int{ - .ty = ty, - .limbs_len = limbs_len, - }), + .data = limbs_list.mutate.len, }); - ip.addLimbsAssumeCapacity(limbs); + limbs_list.addManyAsArrayAssumeCapacity(Int.limbs_items_len)[0].* = @bitCast(Int{ + .ty = ty, + .limbs_len = limbs_len, + }); + limbs_list.appendSliceAssumeCapacity(.{limbs}); } -fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32 { - const fields = @typeInfo(@TypeOf(extra)).Struct.fields; - try ip.extra.ensureUnusedCapacity(gpa, fields.len); - return ip.addExtraAssumeCapacity(extra); +fn addExtra(extra: Local.Extra.Mutable, item: anytype) Allocator.Error!u32 { + const fields = @typeInfo(@TypeOf(item)).Struct.fields; + try extra.ensureUnusedCapacity(fields.len); + return addExtraAssumeCapacity(extra, item); } -fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { - const result: u32 = @intCast(ip.extra.items.len); - inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { - ip.extra.appendAssumeCapacity(switch (field.type) { +fn addExtraAssumeCapacity(extra: Local.Extra.Mutable, item: anytype) u32 { + const result: u32 = extra.mutate.len; + inline for (@typeInfo(@TypeOf(item)).Struct.fields) |field| { + extra.appendAssumeCapacity(.{switch (field.type) { Index, DeclIndex, NamespaceIndex, @@ -8162,7 +8406,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { TrackedInst.Index, TrackedInst.Index.Optional, ComptimeAllocIndex, - => @intFromEnum(@field(extra, field.name)), + => @intFromEnum(@field(item, field.name)), u32, i32, @@ -8174,22 +8418,14 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { Tag.TypeStruct.Flags, Tag.TypeStructPacked.Flags, Tag.Variable.Flags, - => @bitCast(@field(extra, field.name)), + => @bitCast(@field(item, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), - }); + }}); } return result; } -fn reserveLimbs(ip: *InternPool, gpa: Allocator, n: usize) !void { - switch (@sizeOf(Limb)) { - @sizeOf(u32) => try ip.extra.ensureUnusedCapacity(gpa, n), - @sizeOf(u64) => try ip.limbs.ensureUnusedCapacity(gpa, n), - else => @compileError("unsupported host"), - } -} - fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { switch (@sizeOf(Limb)) { @sizeOf(u32) => return addExtraAssumeCapacity(ip, extra), @@ -8212,19 +8448,12 @@ fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { return result; } -fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const Limb) void { - switch (@sizeOf(Limb)) { - @sizeOf(u32) => ip.extra.appendSliceAssumeCapacity(limbs), - @sizeOf(u64) => ip.limbs.appendSliceAssumeCapacity(limbs), - else => @compileError("unsupported host"), - } -} - -fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct { data: T, end: u32 } { +fn extraDataTrail(extra: Local.Extra, comptime T: type, index: u32) struct { data: T, end: u32 } { + const extra_items = extra.view().items(.@"0"); var result: T = undefined; const fields = @typeInfo(T).Struct.fields; - inline for (fields, 0..) |field, i| { - const int32 = ip.extra.items[i + index]; + inline for (fields, index..) |field, extra_index| { + const extra_item = extra_items[extra_index]; @field(result, field.name) = switch (field.type) { Index, DeclIndex, @@ -8240,7 +8469,7 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct TrackedInst.Index, TrackedInst.Index.Optional, ComptimeAllocIndex, - => @enumFromInt(int32), + => @enumFromInt(extra_item), u32, i32, @@ -8252,7 +8481,7 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct Tag.TypeStructPacked.Flags, Tag.Variable.Flags, FuncAnalysis, - => @bitCast(int32), + => @bitCast(extra_item), else => @compileError("bad field type: " ++ @typeName(field.type)), }; @@ -8263,75 +8492,8 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct }; } -fn extraData(ip: *const InternPool, comptime T: type, index: usize) T { - return extraDataTrail(ip, T, index).data; -} - -/// Asserts the struct has 32-bit fields and the number of fields is evenly divisible by 2. -fn limbData(ip: *const InternPool, comptime T: type, index: usize) T { - switch (@sizeOf(Limb)) { - @sizeOf(u32) => return extraData(ip, T, index), - @sizeOf(u64) => {}, - else => @compileError("unsupported host"), - } - var result: T = undefined; - inline for (@typeInfo(T).Struct.fields, 0..) |field, i| { - const host_int = ip.limbs.items[index + i / 2]; - const int32 = if (i % 2 == 0) - @as(u32, @truncate(host_int)) - else - @as(u32, @truncate(host_int >> 32)); - - @field(result, field.name) = switch (field.type) { - u32 => int32, - Index => @enumFromInt(int32), - else => @compileError("bad field type: " ++ @typeName(field.type)), - }; - } - return result; -} - -/// This function returns the Limb slice that is trailing data after a payload. -fn limbSlice(ip: *const InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { - const field_count = @typeInfo(S).Struct.fields.len; - switch (@sizeOf(Limb)) { - @sizeOf(u32) => { - const start = limb_index + field_count; - return ip.extra.items[start..][0..len]; - }, - @sizeOf(u64) => { - const start = limb_index + @divExact(field_count, 2); - return ip.limbs.items[start..][0..len]; - }, - else => @compileError("unsupported host"), - } -} - -const LimbsAsIndexes = struct { - start: u32, - len: u32, -}; - -fn limbsSliceToIndex(ip: *const InternPool, limbs: []const Limb) LimbsAsIndexes { - const host_slice = switch (@sizeOf(Limb)) { - @sizeOf(u32) => ip.extra.items, - @sizeOf(u64) => ip.limbs.items, - else => @compileError("unsupported host"), - }; - // TODO: https://github.com/ziglang/zig/issues/1738 - return .{ - .start = @intCast(@divExact(@intFromPtr(limbs.ptr) - @intFromPtr(host_slice.ptr), @sizeOf(Limb))), - .len = @intCast(limbs.len), - }; -} - -/// This function converts Limb array indexes to a primitive slice type. -fn limbsIndexToSlice(ip: *const InternPool, limbs: LimbsAsIndexes) []const Limb { - return switch (@sizeOf(Limb)) { - @sizeOf(u32) => ip.extra.items[limbs.start..][0..limbs.len], - @sizeOf(u64) => ip.limbs.items[limbs.start..][0..limbs.len], - else => @compileError("unsupported host"), - }; +fn extraData(extra: Local.Extra, comptime T: type, index: u32) T { + return extraDataTrail(extra, T, index).data; } test "basic usage" { @@ -8381,7 +8543,7 @@ pub fn slicePtrType(ip: *const InternPool, index: Index) Index { .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, else => {}, } - const item = index.getItem(ip); + const item = index.unwrap(ip).getItem(ip); switch (item.tag) { .type_slice => return @enumFromInt(item.data), else => unreachable, // not a slice type @@ -8390,18 +8552,20 @@ pub fn slicePtrType(ip: *const InternPool, index: Index) Index { /// Given a slice value, returns the value of the ptr field. pub fn slicePtr(ip: *const InternPool, index: Index) Index { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); switch (item.tag) { - .ptr_slice => return ip.extraData(PtrSlice, item.data).ptr, + .ptr_slice => return extraData(unwrapped_index.getExtra(ip), PtrSlice, item.data).ptr, else => unreachable, // not a slice value } } /// Given a slice value, returns the value of the len field. pub fn sliceLen(ip: *const InternPool, index: Index) Index { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); switch (item.tag) { - .ptr_slice => return ip.extraData(PtrSlice, item.data).len, + .ptr_slice => return extraData(unwrapped_index.getExtra(ip), PtrSlice, item.data).len, else => unreachable, // not a slice value } } @@ -8461,20 +8625,24 @@ pub fn getCoerced( } }), }; }, - else => switch (val.getTag(ip)) { - .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), - .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), - .func_coerced => { - const func: Index = @enumFromInt( - ip.extra.items[val.getData(ip) + std.meta.fieldIndex(Tag.FuncCoerced, "func").?], - ); - switch (func.getTag(ip)) { - .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), - .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), - else => unreachable, - } - }, - else => {}, + else => { + const unwrapped_val = val.unwrap(ip); + const val_item = unwrapped_val.getItem(ip); + switch (val_item.tag) { + .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), + .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), + .func_coerced => { + const func: Index = @enumFromInt(unwrapped_val.getExtra(ip).view().items(.@"0")[ + val_item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").? + ]); + switch (func.unwrap(ip).getTag(ip)) { + .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty), + .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty), + else => unreachable, + } + }, + else => {}, + } }, } @@ -8712,9 +8880,10 @@ fn getCoercedFuncDecl( val: Index, new_ty: Index, ) Allocator.Error!Index { - const prev_ty: Index = @enumFromInt( - ip.extra.items[val.getData(ip) + std.meta.fieldIndex(Tag.FuncDecl, "ty").?], - ); + const unwrapped_val = val.unwrap(ip); + const prev_ty: Index = @enumFromInt(unwrapped_val.getExtra(ip).view().items(.@"0")[ + unwrapped_val.getData(ip) + std.meta.fieldIndex(Tag.FuncDecl, "ty").? + ]); if (new_ty == prev_ty) return val; return getCoercedFunc(ip, gpa, tid, val, new_ty); } @@ -8726,9 +8895,10 @@ fn getCoercedFuncInstance( val: Index, new_ty: Index, ) Allocator.Error!Index { - const prev_ty: Index = @enumFromInt( - ip.extra.items[val.getData(ip) + std.meta.fieldIndex(Tag.FuncInstance, "ty").?], - ); + const unwrapped_val = val.unwrap(ip); + const prev_ty: Index = @enumFromInt(unwrapped_val.getExtra(ip).view().items(.@"0")[ + unwrapped_val.getData(ip) + std.meta.fieldIndex(Tag.FuncInstance, "ty").? + ]); if (new_ty == prev_ty) return val; return getCoercedFunc(ip, gpa, tid, val, new_ty); } @@ -8740,24 +8910,26 @@ fn getCoercedFunc( func: Index, ty: Index, ) Allocator.Error!Index { - const prev_extra_len = ip.extra.items.len; - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncCoerced).Struct.fields.len); - - const items = ip.getLocal(tid).getMutableItems(gpa); + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa); try items.ensureUnusedCapacity(1); + const extra = local.getMutableExtra(gpa); - const extra_index = ip.addExtraAssumeCapacity(Tag.FuncCoerced{ + const prev_extra_len = extra.mutate.len; + try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncCoerced).Struct.fields.len); + + const extra_index = addExtraAssumeCapacity(extra, Tag.FuncCoerced{ .ty = ty, .func = func, }); - errdefer ip.extra.items.len = prev_extra_len; + errdefer extra.mutate.len = prev_extra_len; var gop = try ip.getOrPutKey(gpa, tid, .{ - .func = extraFuncCoerced(ip, extra_index), + .func = ip.extraFuncCoerced(extra.list.*, extra_index), }); defer gop.deinit(); if (gop == .existing) { - ip.extra.items.len = prev_extra_len; + extra.mutate.len = prev_extra_len; return gop.existing; } @@ -8771,34 +8943,17 @@ fn getCoercedFunc( /// Asserts `val` has an integer type. /// Assumes `new_ty` is an integer type. pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, int: Key.Int, new_ty: Index) Allocator.Error!Index { - // The key cannot be passed directly to `get`, otherwise in the case of - // big_int storage, the limbs would be invalidated before they are read. - // Here we pre-reserve the limbs to ensure that the logic in `addInt` will - // not use an invalidated limbs pointer. - const new_storage: Key.Int.Storage = switch (int.storage) { - .u64, .i64, .lazy_align, .lazy_size => int.storage, - .big_int => |big_int| storage: { - const positive = big_int.positive; - const limbs = ip.limbsSliceToIndex(big_int.limbs); - // This line invalidates the limbs slice, but the indexes computed in the - // previous line are still correct. - try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); - break :storage .{ .big_int = .{ - .limbs = ip.limbsIndexToSlice(limbs), - .positive = positive, - } }; - }, - }; return ip.get(gpa, tid, .{ .int = .{ .ty = new_ty, - .storage = new_storage, + .storage = int.storage, } }); } pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { - const item = val.getItem(ip); + const unwrapped_val = val.unwrap(ip); + const item = unwrapped_val.getItem(ip); switch (item.tag) { - .type_function => return extraFuncType(ip, item.data), + .type_function => return extraFuncType(unwrapped_val.tid, unwrapped_val.getExtra(ip), item.data), else => return null, } } @@ -8819,7 +8974,7 @@ pub fn isIntegerType(ip: *const InternPool, ty: Index) bool { .c_ulonglong_type, .comptime_int_type, => true, - else => switch (ty.getTag(ip)) { + else => switch (ty.unwrap(ip).getTag(ip)) { .type_int_signed, .type_int_unsigned, => true, @@ -8895,9 +9050,11 @@ pub fn errorUnionPayload(ip: *const InternPool, ty: Index) Index { /// The is only legal because the initializer is not part of the hash. pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const extra_list = unwrapped_index.getExtra(ip); + const item = unwrapped_index.getItem(ip); assert(item.tag == .variable); - ip.extra.items[item.data + std.meta.fieldIndex(Tag.Variable, "init").?] = @intFromEnum(init_index); + @atomicStore(u32, &extra_list.view().items(.@"0")[item.data + std.meta.fieldIndex(Tag.Variable, "init").?], @intFromEnum(init_index), .release); } pub fn dump(ip: *const InternPool) void { @@ -8907,12 +9064,16 @@ pub fn dump(ip: *const InternPool) void { fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { var items_len: usize = 0; + var extra_len: usize = 0; + var limbs_len: usize = 0; for (ip.locals) |*local| { items_len += local.mutate.items.len; + extra_len += local.mutate.extra.len; + limbs_len += local.mutate.limbs.len; } const items_size = (1 + 4) * items_len; - const extra_size = 4 * ip.extra.items.len; - const limbs_size = 8 * ip.limbs.items.len; + const extra_size = 4 * extra_len; + const limbs_size = 8 * limbs_len; const decls_size = ip.allocated_decls.len * @sizeOf(Module.Decl); // TODO: map overhead size is not taken into account @@ -8929,9 +9090,9 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { total_size, items_len, items_size, - ip.extra.items.len, + extra_len, extra_size, - ip.limbs.items.len, + limbs_len, limbs_size, ip.allocated_decls.len, decls_size, @@ -8943,7 +9104,9 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { }; var counts = std.AutoArrayHashMap(Tag, TagStats).init(arena); for (ip.locals) |*local| { - const items = local.shared.items.view(); + const items = local.shared.items.view().slice(); + const extra_list = local.shared.extra; + const extra_items = extra_list.view().items(.@"0"); for ( items.items(.tag)[0..local.mutate.items.len], items.items(.data)[0..local.mutate.items.len], @@ -8968,12 +9131,12 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .type_error_union => @sizeOf(Key.ErrorUnionType), .type_anyerror_union => 0, .type_error_set => b: { - const info = ip.extraData(Tag.ErrorSet, data); + const info = extraData(extra_list, Tag.ErrorSet, data); break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len); }, .type_inferred_error_set => 0, .type_enum_explicit, .type_enum_nonexhaustive => b: { - const info = ip.extraData(EnumExplicit, data); + const info = extraData(extra_list, EnumExplicit, data); var ints = @typeInfo(EnumExplicit).Struct.fields.len; if (info.zir_index == .none) ints += 1; ints += if (info.captures_len != std.math.maxInt(u32)) @@ -8985,22 +9148,22 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { break :b @sizeOf(u32) * ints; }, .type_enum_auto => b: { - const info = ip.extraData(EnumAuto, data); + const info = extraData(extra_list, EnumAuto, data); const ints = @typeInfo(EnumAuto).Struct.fields.len + info.captures_len + info.fields_len; break :b @sizeOf(u32) * ints; }, .type_opaque => b: { - const info = ip.extraData(Tag.TypeOpaque, data); + const info = extraData(extra_list, Tag.TypeOpaque, data); const ints = @typeInfo(Tag.TypeOpaque).Struct.fields.len + info.captures_len; break :b @sizeOf(u32) * ints; }, .type_struct => b: { if (data == 0) break :b 0; - const extra = ip.extraDataTrail(Tag.TypeStruct, data); + const extra = extraDataTrail(extra_list, Tag.TypeStruct, data); const info = extra.data; var ints: usize = @typeInfo(Tag.TypeStruct).Struct.fields.len; if (info.flags.any_captures) { - const captures_len = ip.extra.items[extra.end]; + const captures_len = extra_items[extra.end]; ints += 1 + captures_len; } ints += info.fields_len; // types @@ -9021,13 +9184,13 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { break :b @sizeOf(u32) * ints; }, .type_struct_anon => b: { - const info = ip.extraData(TypeStructAnon, data); + const info = extraData(extra_list, TypeStructAnon, data); break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len); }, .type_struct_packed => b: { - const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); + const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, data); const captures_len = if (extra.data.flags.any_captures) - ip.extra.items[extra.end] + extra_items[extra.end] else 0; break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + @@ -9035,9 +9198,9 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { extra.data.fields_len * 2); }, .type_struct_packed_inits => b: { - const extra = ip.extraDataTrail(Tag.TypeStructPacked, data); + const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, data); const captures_len = if (extra.data.flags.any_captures) - ip.extra.items[extra.end] + extra_items[extra.end] else 0; break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len + @@ -9045,14 +9208,14 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { extra.data.fields_len * 3); }, .type_tuple_anon => b: { - const info = ip.extraData(TypeStructAnon, data); + const info = extraData(extra_list, TypeStructAnon, data); break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len); }, .type_union => b: { - const extra = ip.extraDataTrail(Tag.TypeUnion, data); + const extra = extraDataTrail(extra_list, Tag.TypeUnion, data); const captures_len = if (extra.data.flags.any_captures) - ip.extra.items[extra.end] + extra_items[extra.end] else 0; const per_field = @sizeOf(u32); // field type @@ -9067,7 +9230,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { }, .type_function => b: { - const info = ip.extraData(Tag.TypeFunction, data); + const info = extraData(extra_list, Tag.TypeFunction, data); break :b @sizeOf(Tag.TypeFunction) + (@sizeOf(Index) * info.params_len) + (@as(u32, 4) * @intFromBool(info.flags.has_comptime_bits)) + @@ -9102,8 +9265,9 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .int_positive, .int_negative, => b: { - const int = ip.limbData(Int, data); - break :b @sizeOf(Int) + int.limbs_len * 8; + const limbs_list = local.shared.getLimbs(); + const int: Int = @bitCast(limbs_list.view().items(.@"0")[data..][0..Int.limbs_items_len].*); + break :b @sizeOf(Int) + int.limbs_len * @sizeOf(Limb); }, .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy), @@ -9114,12 +9278,12 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .enum_tag => @sizeOf(Tag.EnumTag), .bytes => b: { - const info = ip.extraData(Bytes, data); + const info = extraData(extra_list, Bytes, data); const len: usize = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); break :b @sizeOf(Bytes) + len + @intFromBool(info.bytes.at(len - 1, ip) != 0); }, .aggregate => b: { - const info = ip.extraData(Tag.Aggregate, data); + const info = extraData(extra_list, Tag.Aggregate, data); const fields_len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len); }, @@ -9137,7 +9301,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .extern_func => @sizeOf(Tag.ExternFunc), .func_decl => @sizeOf(Tag.FuncDecl), .func_instance => b: { - const info = ip.extraData(Tag.FuncInstance, data); + const info = extraData(extra_list, Tag.FuncInstance, data); const ty = ip.typeOf(info.generic_owner); const params_len = ip.indexToKey(ty).func_type.param_types.len; break :b @sizeOf(Tag.FuncInstance) + @sizeOf(Index) * params_len; @@ -9147,7 +9311,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .union_value => @sizeOf(Key.Union), .memoized_call => b: { - const info = ip.extraData(MemoizedCall, data); + const info = extraData(extra_list, MemoizedCall, data); break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len); }, }); @@ -9287,14 +9451,15 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) var instances: std.AutoArrayHashMapUnmanaged(Index, std.ArrayListUnmanaged(Index)) = .{}; for (ip.locals, 0..) |*local, tid| { - const items = local.shared.items.view(); + const items = local.shared.items.view().slice(); + const extra_list = local.shared.extra; for ( items.items(.tag)[0..local.mutate.items.len], items.items(.data)[0..local.mutate.items.len], 0.., ) |tag, data, index| { if (tag != .func_instance) continue; - const info = ip.extraData(Tag.FuncInstance, data); + const info = extraData(extra_list, Tag.FuncInstance, data); const gop = try instances.getOrPut(arena, info.generic_owner); if (!gop.found_existing) gop.value_ptr.* = .{}; @@ -9319,7 +9484,8 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) const generic_fn_owner_decl = ip.declPtrConst(ip.funcDeclOwner(entry.key_ptr.*)); try w.print("{} ({}): \n", .{ generic_fn_owner_decl.name.fmt(ip), entry.value_ptr.items.len }); for (entry.value_ptr.items) |index| { - const func = ip.extraFuncInstance(index.getData(ip)); + const unwrapped_index = index.unwrap(ip); + const func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), unwrapped_index.getData(ip)); const owner_decl = ip.declPtrConst(func.owner_decl); try w.print(" {}: (", .{owner_decl.name.fmt(ip)}); for (func.comptime_args.get(ip)) |arg| { @@ -9465,9 +9631,9 @@ pub fn getOrPutTrailingString( comptime embedded_nulls: EmbeddedNulls, ) Allocator.Error!embedded_nulls.StringType() { const strings = ip.getLocal(tid).getMutableStrings(gpa); - const start: u32 = @intCast(strings.lenPtr().* - len); - if (len > 0 and strings.view().items(.@"0")[strings.lenPtr().* - 1] == 0) { - strings.lenPtr().* -= 1; + const start: u32 = @intCast(strings.mutate.len - len); + if (len > 0 and strings.view().items(.@"0")[strings.mutate.len - 1] == 0) { + strings.mutate.len -= 1; } else { try strings.ensureUnusedCapacity(1); } @@ -9674,105 +9840,112 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { // This optimization on tags is needed so that indexToKey can call // typeOf without being recursive. - _ => switch (index.getTag(ip)) { - .removed => unreachable, + _ => { + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); + return switch (item.tag) { + .removed => unreachable, - .type_int_signed, - .type_int_unsigned, - .type_array_big, - .type_array_small, - .type_vector, - .type_pointer, - .type_slice, - .type_optional, - .type_anyframe, - .type_error_union, - .type_anyerror_union, - .type_error_set, - .type_inferred_error_set, - .type_enum_auto, - .type_enum_explicit, - .type_enum_nonexhaustive, - .type_opaque, - .type_struct, - .type_struct_anon, - .type_struct_packed, - .type_struct_packed_inits, - .type_tuple_anon, - .type_union, - .type_function, - => .type_type, + .type_int_signed, + .type_int_unsigned, + .type_array_big, + .type_array_small, + .type_vector, + .type_pointer, + .type_slice, + .type_optional, + .type_anyframe, + .type_error_union, + .type_anyerror_union, + .type_error_set, + .type_inferred_error_set, + .type_enum_auto, + .type_enum_explicit, + .type_enum_nonexhaustive, + .type_opaque, + .type_struct, + .type_struct_anon, + .type_struct_packed, + .type_struct_packed_inits, + .type_tuple_anon, + .type_union, + .type_function, + => .type_type, - .undef, - .opt_null, - .only_possible_value, - => @enumFromInt(index.getData(ip)), + .undef, + .opt_null, + .only_possible_value, + => @enumFromInt(item.data), - .simple_type, .simple_value => unreachable, // handled via Index above + .simple_type, .simple_value => unreachable, // handled via Index above - inline .ptr_decl, - .ptr_comptime_alloc, - .ptr_anon_decl, - .ptr_anon_decl_aligned, - .ptr_comptime_field, - .ptr_int, - .ptr_eu_payload, - .ptr_opt_payload, - .ptr_elem, - .ptr_field, - .ptr_slice, - .opt_payload, - .error_union_payload, - .int_small, - .int_lazy_align, - .int_lazy_size, - .error_set_error, - .error_union_error, - .enum_tag, - .variable, - .extern_func, - .func_decl, - .func_instance, - .func_coerced, - .union_value, - .bytes, - .aggregate, - .repeated, - => |t| { - const extra_index = index.getData(ip); - const field_index = std.meta.fieldIndex(t.Payload(), "ty").?; - return @enumFromInt(ip.extra.items[extra_index + field_index]); - }, + inline .ptr_decl, + .ptr_comptime_alloc, + .ptr_anon_decl, + .ptr_anon_decl_aligned, + .ptr_comptime_field, + .ptr_int, + .ptr_eu_payload, + .ptr_opt_payload, + .ptr_elem, + .ptr_field, + .ptr_slice, + .opt_payload, + .error_union_payload, + .int_small, + .int_lazy_align, + .int_lazy_size, + .error_set_error, + .error_union_error, + .enum_tag, + .variable, + .extern_func, + .func_decl, + .func_instance, + .func_coerced, + .union_value, + .bytes, + .aggregate, + .repeated, + => |t| { + const extra_list = unwrapped_index.getExtra(ip); + return @enumFromInt(extra_list.view().items(.@"0")[item.data + std.meta.fieldIndex(t.Payload(), "ty").?]); + }, - .int_u8 => .u8_type, - .int_u16 => .u16_type, - .int_u32 => .u32_type, - .int_i32 => .i32_type, - .int_usize => .usize_type, + .int_u8 => .u8_type, + .int_u16 => .u16_type, + .int_u32 => .u32_type, + .int_i32 => .i32_type, + .int_usize => .usize_type, - .int_comptime_int_u32, - .int_comptime_int_i32, - => .comptime_int_type, + .int_comptime_int_u32, + .int_comptime_int_i32, + => .comptime_int_type, - // Note these are stored in limbs data, not extra data. - .int_positive, - .int_negative, - => ip.limbData(Int, index.getData(ip)).ty, + // Note these are stored in limbs data, not extra data. + .int_positive, + .int_negative, + => { + const limbs_list = ip.getLocalShared(unwrapped_index.tid).getLimbs(); + const int: Int = @bitCast(limbs_list.view().items(.@"0")[item.data..][0..Int.limbs_items_len].*); + return int.ty; + }, - .enum_literal => .enum_literal_type, - .float_f16 => .f16_type, - .float_f32 => .f32_type, - .float_f64 => .f64_type, - .float_f80 => .f80_type, - .float_f128 => .f128_type, + .enum_literal => .enum_literal_type, + .float_f16 => .f16_type, + .float_f32 => .f32_type, + .float_f64 => .f64_type, + .float_f80 => .f80_type, + .float_f128 => .f128_type, - .float_c_longdouble_f80, - .float_c_longdouble_f128, - => .c_longdouble_type, + .float_c_longdouble_f80, + .float_c_longdouble_f128, + => .c_longdouble_type, - .float_comptime_float => .comptime_float_type, + .float_comptime_float => .comptime_float_type, - .memoized_call => unreachable, + .memoized_call => unreachable, + }; }, .none => unreachable, @@ -9806,54 +9979,67 @@ pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 { } pub fn funcTypeReturnType(ip: *const InternPool, ty: Index) Index { - const item = ty.getItem(ip); - const child_item = switch (item.tag) { - .type_pointer => @as(Index, @enumFromInt(ip.extra.items[ - item.data + std.meta.fieldIndex(Tag.TypePointer, "child").? - ])).getItem(ip), - .type_function => item, + const unwrapped_ty = ty.unwrap(ip); + const ty_extra = unwrapped_ty.getExtra(ip); + const ty_item = unwrapped_ty.getItem(ip); + const child_extra, const child_item = switch (ty_item.tag) { + .type_pointer => child: { + const child_index: Index = @enumFromInt(ty_extra.view().items(.@"0")[ + ty_item.data + std.meta.fieldIndex(Tag.TypePointer, "child").? + ]); + const unwrapped_child = child_index.unwrap(ip); + break :child .{ unwrapped_child.getExtra(ip), unwrapped_child.getItem(ip) }; + }, + .type_function => .{ ty_extra, ty_item }, else => unreachable, }; assert(child_item.tag == .type_function); - return @enumFromInt(ip.extra.items[ + return @enumFromInt(child_extra.view().items(.@"0")[ child_item.data + std.meta.fieldIndex(Tag.TypeFunction, "return_type").? ]); } pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { - return switch (ty) { - .noreturn_type => true, - else => switch (ty.getTag(ip)) { - .type_error_set => ip.extra.items[ty.getData(ip) + std.meta.fieldIndex(Tag.ErrorSet, "names_len").?] == 0, - else => false, + switch (ty) { + .noreturn_type => return true, + else => { + const unwrapped_ty = ty.unwrap(ip); + const ty_item = unwrapped_ty.getItem(ip); + return switch (ty_item.tag) { + .type_error_set => unwrapped_ty.getExtra(ip).view().items(.@"0")[ty_item.data + std.meta.fieldIndex(Tag.ErrorSet, "names_len").?] == 0, + else => false, + }; }, - }; + } } pub fn isUndef(ip: *const InternPool, val: Index) bool { - return val == .undef or val.getTag(ip) == .undef; + return val == .undef or val.unwrap(ip).getTag(ip) == .undef; } pub fn isVariable(ip: *const InternPool, val: Index) bool { - return val.getTag(ip) == .variable; + return val.unwrap(ip).getTag(ip) == .variable; } pub fn getBackingDecl(ip: *const InternPool, val: Index) OptionalDeclIndex { var base = val; while (true) { - switch (base.getTag(ip)) { - .ptr_decl => return @enumFromInt(ip.extra.items[ - base.getData(ip) + std.meta.fieldIndex(PtrDecl, "decl").? + const unwrapped_base = base.unwrap(ip); + const base_item = unwrapped_base.getItem(ip); + const base_extra_items = unwrapped_base.getExtra(ip).view().items(.@"0"); + switch (base_item.tag) { + .ptr_decl => return @enumFromInt(base_extra_items[ + base_item.data + std.meta.fieldIndex(PtrDecl, "decl").? ]), inline .ptr_eu_payload, .ptr_opt_payload, .ptr_elem, .ptr_field, - => |tag| base = @enumFromInt(ip.extra.items[ - base.getData(ip) + std.meta.fieldIndex(tag.Payload(), "base").? + => |tag| base = @enumFromInt(base_extra_items[ + base_item.data + std.meta.fieldIndex(tag.Payload(), "base").? ]), - .ptr_slice => base = @enumFromInt(ip.extra.items[ - base.getData(ip) + std.meta.fieldIndex(PtrSlice, "ptr").? + .ptr_slice => base = @enumFromInt(base_extra_items[ + base_item.data + std.meta.fieldIndex(PtrSlice, "ptr").? ]), else => return .none, } @@ -9863,7 +10049,9 @@ pub fn getBackingDecl(ip: *const InternPool, val: Index) OptionalDeclIndex { pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.BaseAddr.Tag { var base = val; while (true) { - switch (base.getTag(ip)) { + const unwrapped_base = base.unwrap(ip); + const base_item = unwrapped_base.getItem(ip); + switch (base_item.tag) { .ptr_decl => return .decl, .ptr_comptime_alloc => return .comptime_alloc, .ptr_anon_decl, @@ -9875,11 +10063,11 @@ pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.BaseAddr.Ta .ptr_opt_payload, .ptr_elem, .ptr_field, - => |tag| base = @enumFromInt(ip.extra.items[ - base.getData(ip) + std.meta.fieldIndex(tag.Payload(), "base").? + => |tag| base = @enumFromInt(unwrapped_base.getExtra(ip).view().items(.@"0")[ + base_item.data + std.meta.fieldIndex(tag.Payload(), "base").? ]), - inline .ptr_slice => |tag| base = @enumFromInt(ip.extra.items[ - base.getData(ip) + std.meta.fieldIndex(tag.Payload(), "ptr").? + inline .ptr_slice => |tag| base = @enumFromInt(unwrapped_base.getExtra(ip).view().items(.@"0")[ + base_item.data + std.meta.fieldIndex(tag.Payload(), "ptr").? ]), else => return null, } @@ -9989,7 +10177,7 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .empty_struct => unreachable, .generic_poison => unreachable, - _ => switch (index.getTag(ip)) { + _ => switch (index.unwrap(ip).getTag(ip)) { .removed => unreachable, .type_int_signed, @@ -10097,30 +10285,35 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois } pub fn isFuncBody(ip: *const InternPool, index: Index) bool { - return switch (index.getTag(ip)) { + return switch (index.unwrap(ip).getTag(ip)) { .func_decl, .func_instance, .func_coerced => true, else => false, }; } pub fn funcAnalysis(ip: *const InternPool, index: Index) *FuncAnalysis { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const extra = unwrapped_index.getExtra(ip); + const item = unwrapped_index.getItem(ip); const extra_index = switch (item.tag) { .func_decl => item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, .func_instance => item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, - .func_coerced => i: { + .func_coerced => { const extra_index = item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?; - const func_index: Index = @enumFromInt(ip.extra.items[extra_index]); - const sub_item = func_index.getItem(ip); - break :i switch (sub_item.tag) { - .func_decl => sub_item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, - .func_instance => sub_item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, - else => unreachable, - }; + const func_index: Index = @enumFromInt(extra.view().items(.@"0")[extra_index]); + const unwrapped_func = func_index.unwrap(ip); + const func_item = unwrapped_func.getItem(ip); + return @ptrCast(&unwrapped_func.getExtra(ip).view().items(.@"0")[ + switch (func_item.tag) { + .func_decl => func_item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, + .func_instance => func_item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, + else => unreachable, + } + ]); }, else => unreachable, }; - return @ptrCast(&ip.extra.items[extra_index]); + return @ptrCast(&extra.view().items(.@"0")[extra_index]); } pub fn funcHasInferredErrorSet(ip: *const InternPool, i: Index) bool { @@ -10128,33 +10321,36 @@ pub fn funcHasInferredErrorSet(ip: *const InternPool, i: Index) bool { } pub fn funcZirBodyInst(ip: *const InternPool, index: Index) TrackedInst.Index { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); + const item_extra = unwrapped_index.getExtra(ip); const zir_body_inst_field_index = std.meta.fieldIndex(Tag.FuncDecl, "zir_body_inst").?; - const extra_index = switch (item.tag) { - .func_decl => item.data + zir_body_inst_field_index, - .func_instance => ei: { + switch (item.tag) { + .func_decl => return @enumFromInt(item_extra.view().items(.@"0")[item.data + zir_body_inst_field_index]), + .func_instance => { const generic_owner_field_index = std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?; - const func_decl_index: Index = @enumFromInt(ip.extra.items[item.data + generic_owner_field_index]); - const func_decl_item = func_decl_index.getItem(ip); + const func_decl_index: Index = @enumFromInt(item_extra.view().items(.@"0")[item.data + generic_owner_field_index]); + const unwrapped_func_decl = func_decl_index.unwrap(ip); + const func_decl_item = unwrapped_func_decl.getItem(ip); + const func_decl_extra = unwrapped_func_decl.getExtra(ip); assert(func_decl_item.tag == .func_decl); - break :ei func_decl_item.data + zir_body_inst_field_index; + return @enumFromInt(func_decl_extra.view().items(.@"0")[func_decl_item.data + zir_body_inst_field_index]); }, .func_coerced => { - const uncoerced_func_index: Index = @enumFromInt(ip.extra.items[ + const uncoerced_func_index: Index = @enumFromInt(item_extra.view().items(.@"0")[ item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").? ]); return ip.funcZirBodyInst(uncoerced_func_index); }, else => unreachable, - }; - return @enumFromInt(ip.extra.items[extra_index]); + } } pub fn iesFuncIndex(ip: *const InternPool, ies_index: Index) Index { - const item = ies_index.getItem(ip); + const item = ies_index.unwrap(ip).getItem(ip); assert(item.tag == .type_inferred_error_set); const func_index: Index = @enumFromInt(item.data); - switch (func_index.getTag(ip)) { + switch (func_index.unwrap(ip).getTag(ip)) { .func_decl, .func_instance => {}, else => unreachable, // assertion failed } @@ -10175,30 +10371,36 @@ pub fn iesResolved(ip: *const InternPool, ies_index: Index) *Index { /// added to `ip`. pub fn funcIesResolved(ip: *const InternPool, func_index: Index) *Index { assert(funcHasInferredErrorSet(ip, func_index)); - const func_item = func_index.getItem(ip); + const unwrapped_func = func_index.unwrap(ip); + const func_extra = unwrapped_func.getExtra(ip); + const func_item = unwrapped_func.getItem(ip); const extra_index = switch (func_item.tag) { .func_decl => func_item.data + @typeInfo(Tag.FuncDecl).Struct.fields.len, .func_instance => func_item.data + @typeInfo(Tag.FuncInstance).Struct.fields.len, - .func_coerced => i: { - const uncoerced_func_index: Index = @enumFromInt(ip.extra.items[ + .func_coerced => { + const uncoerced_func_index: Index = @enumFromInt(func_extra.view().items(.@"0")[ func_item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").? ]); - const uncoerced_func_item = uncoerced_func_index.getItem(ip); - break :i switch (uncoerced_func_item.tag) { - .func_decl => uncoerced_func_item.data + @typeInfo(Tag.FuncDecl).Struct.fields.len, - .func_instance => uncoerced_func_item.data + @typeInfo(Tag.FuncInstance).Struct.fields.len, - else => unreachable, - }; + const unwrapped_uncoerced_func = uncoerced_func_index.unwrap(ip); + const uncoerced_func_item = unwrapped_uncoerced_func.getItem(ip); + return @ptrCast(&unwrapped_uncoerced_func.getExtra(ip).view().items(.@"0")[ + switch (uncoerced_func_item.tag) { + .func_decl => uncoerced_func_item.data + @typeInfo(Tag.FuncDecl).Struct.fields.len, + .func_instance => uncoerced_func_item.data + @typeInfo(Tag.FuncInstance).Struct.fields.len, + else => unreachable, + } + ]); }, else => unreachable, }; - return @ptrCast(&ip.extra.items[extra_index]); + return @ptrCast(&func_extra.view().items(.@"0")[extra_index]); } pub fn funcDeclInfo(ip: *const InternPool, index: Index) Key.Func { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); assert(item.tag == .func_decl); - return extraFuncDecl(ip, item.data); + return extraFuncDecl(unwrapped_index.tid, unwrapped_index.getExtra(ip), item.data); } pub fn funcDeclOwner(ip: *const InternPool, index: Index) DeclIndex { @@ -10206,15 +10408,19 @@ pub fn funcDeclOwner(ip: *const InternPool, index: Index) DeclIndex { } pub fn funcTypeParamsLen(ip: *const InternPool, index: Index) u32 { - const item = index.getItem(ip); + const unwrapped_index = index.unwrap(ip); + const extra_list = unwrapped_index.getExtra(ip); + const item = unwrapped_index.getItem(ip); assert(item.tag == .type_function); - return ip.extra.items[item.data + std.meta.fieldIndex(Tag.TypeFunction, "params_len").?]; + return extra_list.view().items(.@"0")[item.data + std.meta.fieldIndex(Tag.TypeFunction, "params_len").?]; } pub fn unwrapCoercedFunc(ip: *const InternPool, index: Index) Index { - return switch (index.getTag(ip)) { - .func_coerced => @enumFromInt(ip.extra.items[ - index.getData(ip) + std.meta.fieldIndex(Tag.FuncCoerced, "func").? + const unwrapped_index = index.unwrap(ip); + const item = unwrapped_index.getItem(ip); + return switch (item.tag) { + .func_coerced => @enumFromInt(unwrapped_index.getExtra(ip).view().items(.@"0")[ + item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").? ]), .func_instance, .func_decl => index, else => unreachable, @@ -10241,11 +10447,11 @@ pub fn resolveBuiltinType( (ip.zigTypeTagOrPoison(resolved_index) catch unreachable)); // Copy the data - const item = resolved_index.getItem(ip); - const unwrapped = want_index.unwrap(ip); - var items = ip.getLocalShared(unwrapped.tid).items.view().slice(); - items.items(.data)[unwrapped.index] = item.data; - @atomicStore(Tag, &items.items(.tag)[unwrapped.index], item.tag, .release); + const item = resolved_index.unwrap(ip).getItem(ip); + const unwrapped_index = want_index.unwrap(ip); + var items = ip.getLocalShared(unwrapped_index.tid).items.acquire().view().slice(); + items.items(.data)[unwrapped_index.index] = item.data; + @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], item.tag, .release); ip.remove(tid, resolved_index); } @@ -10268,17 +10474,19 @@ pub fn structDecl(ip: *const InternPool, i: Index) OptionalDeclIndex { /// Returns the already-existing field with the same name, if any. pub fn addFieldName( ip: *InternPool, + extra: Local.Extra, names_map: MapIndex, names_start: u32, name: NullTerminatedString, ) ?u32 { + const extra_items = extra.view().items(.@"0"); const map = &ip.maps.items[@intFromEnum(names_map)]; const field_index = map.count(); - const strings = ip.extra.items[names_start..][0..field_index]; + const strings = extra_items[names_start..][0..field_index]; const adapter: NullTerminatedString.Adapter = .{ .strings = @ptrCast(strings) }; const gop = map.getOrPutAssumeCapacityAdapted(name, adapter); if (gop.found_existing) return @intCast(gop.index); - ip.extra.items[names_start + field_index] = @intFromEnum(name); + extra_items[names_start + field_index] = @intFromEnum(name); return null; } diff --git a/src/Sema.zig b/src/Sema.zig index 34db457955..f1c61fdd2a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -36925,7 +36925,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .none, => unreachable, - _ => switch (ty.toIntern().getTag(ip)) { + _ => switch (ty.toIntern().unwrap(ip).getTag(ip)) { .removed => unreachable, .type_int_signed, // i0 handled above diff --git a/src/Type.zig b/src/Type.zig index ba53535d40..57ac2310d5 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -3686,7 +3686,7 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void { .empty_struct => unreachable, .generic_poison => unreachable, - else => switch (ty_ip.getTag(ip)) { + else => switch (ty_ip.unwrap(ip).getTag(ip)) { .type_struct, .type_struct_packed, .type_struct_packed_inits, diff --git a/src/Value.zig b/src/Value.zig index c3e4b05fcb..f114a2c7fa 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -110,14 +110,13 @@ fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.Null const ip = &mod.intern_pool; const len: u32 = @intCast(len_u64); const strings = ip.getLocal(pt.tid).getMutableStrings(gpa); - const strings_len = strings.lenPtr(); try strings.ensureUnusedCapacity(len); for (0..len) |i| { // I don't think elemValue has the possibility to affect ip.string_bytes. Let's // assert just to be sure. - const prev_len = strings_len.*; + const prev_len = strings.mutate.len; const elem_val = try val.elemValue(pt, i); - assert(strings_len.* == prev_len); + assert(strings.mutate.len == prev_len); const byte: u8 = @intCast(elem_val.toUnsignedInt(pt)); strings.appendAssumeCapacity(.{byte}); } diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 0396d06b98..b0fc35b552 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -3,7 +3,7 @@ zcu: *Zcu, /// Dense, per-thread unique index. tid: Id, -pub const Id = if (InternPool.single_threaded) enum { main } else enum(usize) { main, _ }; +pub const Id = if (InternPool.single_threaded) enum { main } else enum(u8) { main, _ }; pub fn astGenFile( pt: Zcu.PerThread, diff --git a/src/main.zig b/src/main.zig index 9fd9087b63..c7bbb9883c 100644 --- a/src/main.zig +++ b/src/main.zig @@ -403,6 +403,7 @@ const usage_build_generic = \\General Options: \\ -h, --help Print this help and exit \\ --color [auto|off|on] Enable or disable colored error messages + \\ -j Limit concurrent jobs (default is to use all CPU cores) \\ -femit-bin[=path] (default) Output machine code \\ -fno-emit-bin Do not output machine code \\ -femit-asm[=path] Output .s (assembly code) @@ -1004,6 +1005,7 @@ fn buildOutputType( .on else .auto; + var n_jobs: ?u32 = null; switch (arg_mode) { .build, .translate_c, .zig_test, .run => { @@ -1141,6 +1143,17 @@ fn buildOutputType( color = std.meta.stringToEnum(Color, next_arg) orelse { fatal("expected [auto|on|off] after --color, found '{s}'", .{next_arg}); }; + } else if (mem.startsWith(u8, arg, "-j")) { + const str = arg["-j".len..]; + const num = std.fmt.parseUnsigned(u32, str, 10) catch |err| { + fatal("unable to parse jobs count '{s}': {s}", .{ + str, @errorName(err), + }); + }; + if (num < 1) { + fatal("number of jobs must be at least 1\n", .{}); + } + n_jobs = num; } else if (mem.eql(u8, arg, "--subsystem")) { subsystem = try parseSubSystem(args_iter.nextOrFatal()); } else if (mem.eql(u8, arg, "-O")) { @@ -3092,7 +3105,11 @@ fn buildOutputType( defer emit_implib_resolved.deinit(); var thread_pool: ThreadPool = undefined; - try thread_pool.init(.{ .allocator = gpa, .track_ids = true }); + try thread_pool.init(.{ + .allocator = gpa, + .n_jobs = @min(@max(n_jobs orelse std.Thread.getCpuCount() catch 1, 1), std.math.maxInt(u8)), + .track_ids = true, + }); defer thread_pool.deinit(); var cleanup_local_cache_dir: ?fs.Dir = null; @@ -4644,6 +4661,7 @@ const usage_build = \\ all Print the build summary in its entirety \\ failures (Default) Only print failed steps \\ none Do not print the build summary + \\ -j Limit concurrent jobs (default is to use all CPU cores) \\ --build-file [file] Override path to build.zig \\ --cache-dir [path] Override path to local Zig cache directory \\ --global-cache-dir [path] Override path to global Zig cache directory @@ -4718,6 +4736,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { try child_argv.append("-Z" ++ results_tmp_file_nonce); var color: Color = .auto; + var n_jobs: ?u32 = null; { var i: usize = 0; @@ -4811,6 +4830,17 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { }; try child_argv.appendSlice(&.{ arg, args[i] }); continue; + } else if (mem.startsWith(u8, arg, "-j")) { + const str = arg["-j".len..]; + const num = std.fmt.parseUnsigned(u32, str, 10) catch |err| { + fatal("unable to parse jobs count '{s}': {s}", .{ + str, @errorName(err), + }); + }; + if (num < 1) { + fatal("number of jobs must be at least 1\n", .{}); + } + n_jobs = num; } else if (mem.eql(u8, arg, "--seed")) { if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); i += 1; @@ -4895,7 +4925,11 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { child_argv.items[argv_index_cache_dir] = local_cache_directory.path orelse cwd_path; var thread_pool: ThreadPool = undefined; - try thread_pool.init(.{ .allocator = gpa, .track_ids = true }); + try thread_pool.init(.{ + .allocator = gpa, + .n_jobs = @min(@max(n_jobs orelse std.Thread.getCpuCount() catch 1, 1), std.math.maxInt(u8)), + .track_ids = true, + }); defer thread_pool.deinit(); // Dummy http client that is not actually used when only_core_functionality is enabled. @@ -5329,7 +5363,11 @@ fn jitCmd( defer global_cache_directory.handle.close(); var thread_pool: ThreadPool = undefined; - try thread_pool.init(.{ .allocator = gpa, .track_ids = true }); + try thread_pool.init(.{ + .allocator = gpa, + .n_jobs = @min(@max(std.Thread.getCpuCount() catch 1, 1), std.math.maxInt(u8)), + .track_ids = true, + }); defer thread_pool.deinit(); var child_argv: std.ArrayListUnmanaged([]const u8) = .{}; -- cgit v1.2.3 From 65ced4a33436fa762de75e22a986ae08a8c0d9cc Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 8 Jul 2024 09:05:30 -0400 Subject: Compilation: put supported codegen backends on a separate thread (There are no supported backends.) --- lib/std/Progress.zig | 2 +- lib/std/Thread/Pool.zig | 23 ++++---- src/Compilation.zig | 137 +++++++++++++++++++++++++++++++++++++-------- src/Compilation/Config.zig | 8 +-- src/Zcu.zig | 14 ++--- src/Zcu/PerThread.zig | 4 +- src/target.zig | 50 ++++++++++++----- 7 files changed, 173 insertions(+), 65 deletions(-) (limited to 'src/Compilation.zig') diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 160894aae9..2028e95dd5 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -282,7 +282,7 @@ pub const Node = struct { } fn init(free_index: Index, parent: Parent, name: []const u8, estimated_total_items: usize) Node { - assert(parent != .unused); + assert(parent == .none or @intFromEnum(parent) < node_storage_buffer_len); const storage = storageByIndex(free_index); storage.* = .{ diff --git a/lib/std/Thread/Pool.zig b/lib/std/Thread/Pool.zig index d501b66520..179f2f8521 100644 --- a/lib/std/Thread/Pool.zig +++ b/lib/std/Thread/Pool.zig @@ -21,11 +21,11 @@ const Runnable = struct { runFn: RunProto, }; -const RunProto = *const fn (*Runnable, id: ?u32) void; +const RunProto = *const fn (*Runnable, id: ?usize) void; pub const Options = struct { allocator: std.mem.Allocator, - n_jobs: ?u32 = null, + n_jobs: ?usize = null, track_ids: bool = false, }; @@ -109,7 +109,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } }, wait_group: *WaitGroup, - fn runFn(runnable: *Runnable, _: ?u32) void { + fn runFn(runnable: *Runnable, _: ?usize) void { const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable); const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node)); @call(.auto, func, closure.arguments); @@ -150,7 +150,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args /// Runs `func` in the thread pool, calling `WaitGroup.start` beforehand, and /// `WaitGroup.finish` after it returns. /// -/// The first argument passed to `func` is a dense `u32` thread id, the rest +/// The first argument passed to `func` is a dense `usize` thread id, the rest /// of the arguments are passed from `args`. Requires the pool to have been /// initialized with `.track_ids = true`. /// @@ -172,7 +172,7 @@ pub fn spawnWgId(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, ar run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } }, wait_group: *WaitGroup, - fn runFn(runnable: *Runnable, id: ?u32) void { + fn runFn(runnable: *Runnable, id: ?usize) void { const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable); const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node)); @call(.auto, func, .{id.?} ++ closure.arguments); @@ -191,7 +191,7 @@ pub fn spawnWgId(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, ar pool.mutex.lock(); const closure = pool.allocator.create(Closure) catch { - const id = pool.ids.getIndex(std.Thread.getCurrentId()); + const id: ?usize = pool.ids.getIndex(std.Thread.getCurrentId()); pool.mutex.unlock(); @call(.auto, func, .{id.?} ++ args); wait_group.finish(); @@ -258,7 +258,7 @@ fn worker(pool: *Pool) void { pool.mutex.lock(); defer pool.mutex.unlock(); - const id: ?u32 = if (pool.ids.count() > 0) @intCast(pool.ids.count()) else null; + const id: ?usize = if (pool.ids.count() > 0) @intCast(pool.ids.count()) else null; if (id) |_| pool.ids.putAssumeCapacityNoClobber(std.Thread.getCurrentId(), {}); while (true) { @@ -280,15 +280,12 @@ fn worker(pool: *Pool) void { } pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void { - var id: ?u32 = null; + var id: ?usize = null; while (!wait_group.isDone()) { pool.mutex.lock(); if (pool.run_queue.popFirst()) |run_node| { - id = id orelse if (pool.ids.getIndex(std.Thread.getCurrentId())) |index| - @intCast(index) - else - null; + id = id orelse pool.ids.getIndex(std.Thread.getCurrentId()); pool.mutex.unlock(); run_node.data.runFn(&run_node.data, id); continue; @@ -300,6 +297,6 @@ pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void { } } -pub fn getIdCount(pool: *Pool) u32 { +pub fn getIdCount(pool: *Pool) usize { return @intCast(1 + pool.threads.len); } diff --git a/src/Compilation.zig b/src/Compilation.zig index 74e8222bc3..118e325ed7 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -103,6 +103,14 @@ lld_errors: std.ArrayListUnmanaged(LldError) = .{}, work_queue: std.fifo.LinearFifo(Job, .Dynamic), +codegen_work: if (InternPool.single_threaded) void else struct { + mutex: std.Thread.Mutex, + cond: std.Thread.Condition, + queue: std.fifo.LinearFifo(CodegenJob, .Dynamic), + job_error: ?JobError, + done: bool, +}, + /// These jobs are to invoke the Clang compiler to create an object file, which /// gets linked with the Compilation. c_object_work_queue: std.fifo.LinearFifo(*CObject, .Dynamic), @@ -362,6 +370,16 @@ const Job = union(enum) { windows_import_lib: usize, }; +const CodegenJob = union(enum) { + decl: InternPool.DeclIndex, + func: struct { + func: InternPool.Index, + /// This `Air` is owned by the `Job` and allocated with `gpa`. + /// It must be deinited when the job is processed. + air: Air, + }, +}; + pub const CObject = struct { /// Relative to cwd. Owned by arena. src: CSourceFile, @@ -1429,6 +1447,13 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .emit_llvm_ir = options.emit_llvm_ir, .emit_llvm_bc = options.emit_llvm_bc, .work_queue = std.fifo.LinearFifo(Job, .Dynamic).init(gpa), + .codegen_work = if (InternPool.single_threaded) {} else .{ + .mutex = .{}, + .cond = .{}, + .queue = std.fifo.LinearFifo(CodegenJob, .Dynamic).init(gpa), + .job_error = null, + .done = false, + }, .c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa), .win32_resource_work_queue = if (build_options.only_core_functionality) {} else std.fifo.LinearFifo(*Win32Resource, .Dynamic).init(gpa), .astgen_work_queue = std.fifo.LinearFifo(Zcu.File.Index, .Dynamic).init(gpa), @@ -3310,7 +3335,21 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Zcu.File) !void { pub fn performAllTheWork( comp: *Compilation, main_progress_node: std.Progress.Node, -) error{ TimerUnsupported, OutOfMemory }!void { +) JobError!void { + defer if (comp.module) |mod| { + mod.sema_prog_node.end(); + mod.sema_prog_node = std.Progress.Node.none; + mod.codegen_prog_node.end(); + mod.codegen_prog_node = std.Progress.Node.none; + }; + try comp.performAllTheWorkInner(main_progress_node); + if (!InternPool.single_threaded) if (comp.codegen_work.job_error) |job_error| return job_error; +} + +fn performAllTheWorkInner( + comp: *Compilation, + main_progress_node: std.Progress.Node, +) JobError!void { // Here we queue up all the AstGen tasks first, followed by C object compilation. // We wait until the AstGen tasks are all completed before proceeding to the // (at least for now) single-threaded main work queue. However, C object compilation @@ -3410,16 +3449,20 @@ pub fn performAllTheWork( mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); mod.codegen_prog_node = main_progress_node.start("Code Generation", 0); } - defer if (comp.module) |mod| { - mod.sema_prog_node.end(); - mod.sema_prog_node = undefined; - mod.codegen_prog_node.end(); - mod.codegen_prog_node = undefined; + + if (!InternPool.single_threaded) comp.thread_pool.spawnWgId(&comp.work_queue_wait_group, codegenThread, .{comp}); + defer if (!InternPool.single_threaded) { + { + comp.codegen_work.mutex.lock(); + defer comp.codegen_work.mutex.unlock(); + comp.codegen_work.done = true; + } + comp.codegen_work.cond.signal(); }; while (true) { if (comp.work_queue.readItem()) |work_item| { - try processOneJob(0, comp, work_item, main_progress_node); + try processOneJob(@intFromEnum(Zcu.PerThread.Id.main), comp, work_item, main_progress_node); continue; } if (comp.module) |zcu| { @@ -3447,11 +3490,12 @@ pub fn performAllTheWork( } } -fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progress.Node) !void { +const JobError = Allocator.Error; + +fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progress.Node) JobError!void { switch (job) { .codegen_decl => |decl_index| { - const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; - const decl = pt.zcu.declPtr(decl_index); + const decl = comp.module.?.declPtr(decl_index); switch (decl.analysis) { .unreferenced => unreachable, @@ -3461,26 +3505,20 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre .sema_failure, .codegen_failure, .dependency_failure, - => return, + => {}, .complete => { - const named_frame = tracy.namedFrame("codegen_decl"); - defer named_frame.end(); - assert(decl.has_tv); - - try pt.linkerUpdateDecl(decl_index); - return; + try comp.queueCodegenJob(tid, .{ .decl = decl_index }); }, } }, .codegen_func => |func| { - const named_frame = tracy.namedFrame("codegen_func"); - defer named_frame.end(); - - const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; // This call takes ownership of `func.air`. - try pt.linkerUpdateFunc(func.func, func.air); + try comp.queueCodegenJob(tid, .{ .func = .{ + .func = func.func, + .air = func.air, + } }); }, .analyze_func => |func| { const named_frame = tracy.namedFrame("analyze_func"); @@ -3772,6 +3810,61 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre } } +fn queueCodegenJob(comp: *Compilation, tid: usize, codegen_job: CodegenJob) !void { + if (InternPool.single_threaded or + !comp.module.?.backendSupportsFeature(.separate_thread)) + return processOneCodegenJob(tid, comp, codegen_job); + + { + comp.codegen_work.mutex.lock(); + defer comp.codegen_work.mutex.unlock(); + try comp.codegen_work.queue.writeItem(codegen_job); + } + comp.codegen_work.cond.signal(); +} + +fn codegenThread(tid: usize, comp: *Compilation) void { + comp.codegen_work.mutex.lock(); + defer comp.codegen_work.mutex.unlock(); + + while (true) { + if (comp.codegen_work.queue.readItem()) |codegen_job| { + comp.codegen_work.mutex.unlock(); + defer comp.codegen_work.mutex.lock(); + + processOneCodegenJob(tid, comp, codegen_job) catch |job_error| { + comp.codegen_work.job_error = job_error; + break; + }; + continue; + } + + if (comp.codegen_work.done) break; + + comp.codegen_work.cond.wait(&comp.codegen_work.mutex); + } +} + +fn processOneCodegenJob(tid: usize, comp: *Compilation, codegen_job: CodegenJob) JobError!void { + switch (codegen_job) { + .decl => |decl_index| { + const named_frame = tracy.namedFrame("codegen_decl"); + defer named_frame.end(); + + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + try pt.linkerUpdateDecl(decl_index); + }, + .func => |func| { + const named_frame = tracy.namedFrame("codegen_func"); + defer named_frame.end(); + + const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; + // This call takes ownership of `func.air`. + try pt.linkerUpdateFunc(func.func, func.air); + }, + } +} + fn workerDocsCopy(comp: *Compilation) void { docsCopyFallible(comp) catch |err| { return comp.lockAndSetMiscFailure( diff --git a/src/Compilation/Config.zig b/src/Compilation/Config.zig index 2de2184252..6e28f5028c 100644 --- a/src/Compilation/Config.zig +++ b/src/Compilation/Config.zig @@ -440,12 +440,8 @@ pub fn resolve(options: Options) ResolveError!Config { }; }; - const backend_supports_error_tracing = target_util.backendSupportsFeature( - target.cpu.arch, - target.ofmt, - use_llvm, - .error_return_trace, - ); + const backend = target_util.zigBackend(target, use_llvm); + const backend_supports_error_tracing = target_util.backendSupportsFeature(backend, .error_return_trace); const root_error_tracing = b: { if (options.root_error_tracing) |x| break :b x; diff --git a/src/Zcu.zig b/src/Zcu.zig index b855e4fcf0..2f87bcca0f 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -64,8 +64,8 @@ root_mod: *Package.Module, /// `root_mod` is the test runner, and `main_mod` is the user's source file which has the tests. main_mod: *Package.Module, std_mod: *Package.Module, -sema_prog_node: std.Progress.Node = undefined, -codegen_prog_node: std.Progress.Node = undefined, +sema_prog_node: std.Progress.Node = std.Progress.Node.none, +codegen_prog_node: std.Progress.Node = std.Progress.Node.none, /// Used by AstGen worker to load and store ZIR cache. global_zir_cache: Compilation.Directory, @@ -3557,13 +3557,13 @@ pub const Feature = enum { /// to generate better machine code in the backends. All backends should migrate to /// enabling this feature. safety_checked_instructions, + /// If the backend supports running from another thread. + separate_thread, }; -pub fn backendSupportsFeature(zcu: Module, feature: Feature) bool { - const cpu_arch = zcu.root_mod.resolved_target.result.cpu.arch; - const ofmt = zcu.root_mod.resolved_target.result.ofmt; - const use_llvm = zcu.comp.config.use_llvm; - return target_util.backendSupportsFeature(cpu_arch, ofmt, use_llvm, feature); +pub fn backendSupportsFeature(zcu: Module, comptime feature: Feature) bool { + const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm); + return target_util.backendSupportsFeature(backend, feature); } pub const AtomicPtrAlignmentError = error{ diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index b0fc35b552..f8a3104dc0 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -2129,7 +2129,7 @@ pub fn populateTestFunctions( zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); defer { zcu.sema_prog_node.end(); - zcu.sema_prog_node = undefined; + zcu.sema_prog_node = std.Progress.Node.none; } try pt.ensureDeclAnalyzed(decl_index); } @@ -2238,7 +2238,7 @@ pub fn populateTestFunctions( zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0); defer { zcu.codegen_prog_node.end(); - zcu.codegen_prog_node = undefined; + zcu.codegen_prog_node = std.Progress.Node.none; } try pt.linkerUpdateDecl(decl_index); diff --git a/src/target.zig b/src/target.zig index a253c1fa0b..2accc100b8 100644 --- a/src/target.zig +++ b/src/target.zig @@ -537,20 +537,42 @@ pub fn zigBackend(target: std.Target, use_llvm: bool) std.builtin.CompilerBacken }; } -pub fn backendSupportsFeature( - cpu_arch: std.Target.Cpu.Arch, - ofmt: std.Target.ObjectFormat, - use_llvm: bool, - feature: Feature, -) bool { +pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, comptime feature: Feature) bool { return switch (feature) { - .panic_fn => ofmt == .c or use_llvm or cpu_arch == .x86_64 or cpu_arch == .riscv64, - .panic_unwrap_error => ofmt == .c or use_llvm, - .safety_check_formatted => ofmt == .c or use_llvm, - .error_return_trace => use_llvm, - .is_named_enum_value => use_llvm, - .error_set_has_value => use_llvm or cpu_arch.isWasm(), - .field_reordering => ofmt == .c or use_llvm, - .safety_checked_instructions => use_llvm, + .panic_fn => switch (backend) { + .stage2_c, .stage2_llvm, .stage2_x86_64, .stage2_riscv64 => true, + else => false, + }, + .panic_unwrap_error => switch (backend) { + .stage2_c, .stage2_llvm => true, + else => false, + }, + .safety_check_formatted => switch (backend) { + .stage2_c, .stage2_llvm => true, + else => false, + }, + .error_return_trace => switch (backend) { + .stage2_llvm => true, + else => false, + }, + .is_named_enum_value => switch (backend) { + .stage2_llvm => true, + else => false, + }, + .error_set_has_value => switch (backend) { + .stage2_llvm, .stage2_wasm => true, + else => false, + }, + .field_reordering => switch (backend) { + .stage2_c, .stage2_llvm => true, + else => false, + }, + .safety_checked_instructions => switch (backend) { + .stage2_llvm => true, + else => false, + }, + .separate_thread => switch (backend) { + else => false, + }, }; } -- cgit v1.2.3