aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Compilation.zig887
-rw-r--r--src/IncrementalDebugServer.zig148
-rw-r--r--src/InternPool.zig772
-rw-r--r--src/Sema.zig673
-rw-r--r--src/Sema/LowerZon.zig75
-rw-r--r--src/Type.zig34
-rw-r--r--src/Value.zig27
-rw-r--r--src/Zcu.zig213
-rw-r--r--src/Zcu/PerThread.zig298
-rw-r--r--src/codegen/spirv/CodeGen.zig15
-rw-r--r--src/codegen/x86_64/CodeGen.zig18
-rw-r--r--src/libs/freebsd.zig11
-rw-r--r--src/libs/glibc.zig11
-rw-r--r--src/libs/libcxx.zig10
-rw-r--r--src/libs/libtsan.zig6
-rw-r--r--src/libs/libunwind.zig6
-rw-r--r--src/libs/mingw.zig8
-rw-r--r--src/libs/musl.zig8
-rw-r--r--src/libs/netbsd.zig11
-rw-r--r--src/link.zig157
-rw-r--r--src/link/Elf.zig5
-rw-r--r--src/link/MachO.zig4
-rw-r--r--src/link/MachO/Atom.zig5
-rw-r--r--src/link/MachO/CodeSignature.zig2
-rw-r--r--src/link/MachO/InternalObject.zig5
-rw-r--r--src/link/MachO/file.zig5
-rw-r--r--src/link/MachO/hasher.zig14
-rw-r--r--src/link/MachO/relocatable.zig1
-rw-r--r--src/link/MachO/uuid.zig3
-rw-r--r--src/link/Queue.zig433
-rw-r--r--src/link/Wasm.zig5
-rw-r--r--src/main.zig70
-rw-r--r--src/mutable_value.zig5
33 files changed, 2239 insertions, 1706 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig
index c40e025955..931a0b2d14 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -10,8 +10,6 @@ const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.compilation);
const Target = std.Target;
-const ThreadPool = std.Thread.Pool;
-const WaitGroup = std.Thread.WaitGroup;
const ErrorBundle = std.zig.ErrorBundle;
const fatal = std.process.fatal;
@@ -56,6 +54,7 @@ gpa: Allocator,
/// threads at once.
arena: Allocator,
io: Io,
+thread_limit: usize,
/// Not every Compilation compiles .zig code! For example you could do `zig build-exe foo.o`.
zcu: ?*Zcu,
/// Contains different state depending on the `CacheMode` used by this `Compilation`.
@@ -110,7 +109,14 @@ win32_resource_table: if (dev.env.supports(.win32_resource)) std.AutoArrayHashMa
} = .{},
link_diags: link.Diags,
-link_task_queue: link.Queue = .empty,
+link_queue: link.Queue = .empty,
+
+/// This is populated during `Compilation.create` with a set of prelink tasks which need to be
+/// queued on the first update. In `update`, we will send these tasks to the linker, and clear
+/// them from this list.
+///
+/// Allocated into `gpa`.
+oneshot_prelink_tasks: std.ArrayList(link.PrelinkTask),
/// Set of work that can be represented by only flags to determine whether the
/// work is queued or not.
@@ -198,7 +204,6 @@ libc_include_dir_list: []const []const u8,
libc_framework_dir_list: []const []const u8,
rc_includes: std.zig.RcIncludes,
mingw_unicode_entry_point: bool,
-thread_pool: *ThreadPool,
/// Populated when we build the libc++ static library. A Job to build this is placed in the queue
/// and resolved before calling linker.flush().
@@ -248,16 +253,10 @@ crt_files: std.StringHashMapUnmanaged(CrtFile) = .empty,
reference_trace: ?u32 = null,
/// This mutex guards all `Compilation` mutable state.
-/// Disabled in single-threaded mode because the thread pool spawns in the same thread.
-mutex: if (builtin.single_threaded) struct {
- pub inline fn tryLock(_: @This()) void {}
- pub inline fn lock(_: @This()) void {}
- pub inline fn unlock(_: @This()) void {}
-} else std.Thread.Mutex = .{},
+mutex: std.Io.Mutex = .init,
test_filters: []const []const u8,
-link_task_wait_group: WaitGroup = .{},
link_prog_node: std.Progress.Node = .none,
llvm_opt_bisect_limit: c_int,
@@ -1568,7 +1567,7 @@ pub const CacheMode = enum {
pub const ParentWholeCache = struct {
manifest: *Cache.Manifest,
- mutex: *std.Thread.Mutex,
+ mutex: *std.Io.Mutex,
prefix_map: [4]u8,
};
@@ -1596,7 +1595,7 @@ const CacheUse = union(CacheMode) {
lf_open_opts: link.File.OpenOptions,
/// This is a pointer to a local variable inside `update`.
cache_manifest: ?*Cache.Manifest,
- cache_manifest_mutex: std.Thread.Mutex,
+ cache_manifest_mutex: std.Io.Mutex,
/// This is non-`null` for most of the body of `update`. It is the temporary directory which
/// we initially emit our artifacts to. After the main part of the update is done, it will
/// be closed and moved to its final location, and this field set to `null`.
@@ -1636,7 +1635,7 @@ const CacheUse = union(CacheMode) {
pub const CreateOptions = struct {
dirs: Directories,
- thread_pool: *ThreadPool,
+ thread_limit: usize,
self_exe_path: ?[]const u8 = null,
/// Options that have been resolved by calling `resolveDefaults`.
@@ -2211,8 +2210,9 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
.llvm_object = null,
.analysis_roots_buffer = undefined,
.analysis_roots_len = 0,
+ .codegen_task_pool = try .init(arena),
};
- try zcu.init(options.thread_pool.getIdCount());
+ try zcu.init(gpa, io, options.thread_limit);
break :blk zcu;
} else blk: {
if (options.emit_h != .no) return diag.fail(.emit_h_without_zcu);
@@ -2224,6 +2224,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
.gpa = gpa,
.arena = arena,
.io = io,
+ .thread_limit = options.thread_limit,
.zcu = opt_zcu,
.cache_use = undefined, // populated below
.bin_file = null, // populated below if necessary
@@ -2241,7 +2242,6 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
.libc_framework_dir_list = libc_dirs.libc_framework_dir_list,
.rc_includes = options.rc_includes,
.mingw_unicode_entry_point = options.mingw_unicode_entry_point,
- .thread_pool = options.thread_pool,
.clang_passthrough_mode = options.clang_passthrough_mode,
.clang_preprocessor_mode = options.clang_preprocessor_mode,
.verbose_cc = options.verbose_cc,
@@ -2282,7 +2282,8 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
.global_cc_argv = options.global_cc_argv,
.file_system_inputs = options.file_system_inputs,
.parent_whole_cache = options.parent_whole_cache,
- .link_diags = .init(gpa),
+ .link_diags = .init(gpa, io),
+ .oneshot_prelink_tasks = .empty,
.emit_bin = try options.emit_bin.resolve(arena, &options, .bin),
.emit_asm = try options.emit_asm.resolve(arena, &options, .@"asm"),
.emit_implib = try options.emit_implib.resolve(arena, &options, .implib),
@@ -2468,7 +2469,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
whole.* = .{
.lf_open_opts = lf_open_opts,
.cache_manifest = null,
- .cache_manifest_mutex = .{},
+ .cache_manifest_mutex = .init,
.tmp_artifact_directory = null,
.lock = null,
};
@@ -2553,14 +2554,14 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
};
const fields = @typeInfo(@TypeOf(paths)).@"struct".fields;
- try comp.link_task_queue.queued_prelink.ensureUnusedCapacity(gpa, fields.len + 1);
+ try comp.oneshot_prelink_tasks.ensureUnusedCapacity(gpa, fields.len + 1);
inline for (fields) |field| {
if (@field(paths, field.name)) |path| {
- comp.link_task_queue.queued_prelink.appendAssumeCapacity(.{ .load_object = path });
+ comp.oneshot_prelink_tasks.appendAssumeCapacity(.{ .load_object = path });
}
}
// Loads the libraries provided by `target_util.libcFullLinkFlags(target)`.
- comp.link_task_queue.queued_prelink.appendAssumeCapacity(.load_host_libc);
+ comp.oneshot_prelink_tasks.appendAssumeCapacity(.load_host_libc);
} else if (target.isMuslLibC()) {
if (!std.zig.target.canBuildLibC(target)) return diag.fail(.cross_libc_unavailable);
@@ -2629,10 +2630,9 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
for (0..count) |i| {
try comp.queueJob(.{ .windows_import_lib = i });
}
- // when integrating coff linker with prelink, the above
- // queueJob will need to change into something else since those
- // jobs are dispatched *after* the link_task_wait_group.wait()
- // that happens when separateCodegenThreadOk() is false.
+ // when integrating coff linker with prelink, the above `queueJob` will need to move
+ // to something in `dispatchPrelinkWork`, which must queue all prelink link tasks
+ // *before* we begin working on the main job queue.
}
if (comp.wantBuildLibUnwindFromSource()) {
comp.queued_jobs.libunwind = true;
@@ -2681,19 +2681,15 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
}
}
- try comp.link_task_queue.queued_prelink.append(gpa, .load_explicitly_provided);
+ try comp.oneshot_prelink_tasks.append(gpa, .load_explicitly_provided);
}
- log.debug("queued prelink tasks: {d}", .{comp.link_task_queue.queued_prelink.items.len});
+ log.debug("queued oneshot prelink tasks: {d}", .{comp.oneshot_prelink_tasks.items.len});
return comp;
}
pub fn destroy(comp: *Compilation) void {
const gpa = comp.gpa;
- // This needs to be destroyed first, because it might contain MIR which we only know
- // how to interpret (which kind of MIR it is) from `comp.bin_file`.
- comp.link_task_queue.deinit(comp);
-
if (comp.bin_file) |lf| lf.destroy();
if (comp.zcu) |zcu| zcu.deinit();
comp.cache_use.deinit();
@@ -2760,6 +2756,7 @@ pub fn destroy(comp: *Compilation) void {
if (comp.time_report) |*tr| tr.deinit(gpa);
comp.link_diags.deinit();
+ comp.oneshot_prelink_tasks.deinit(gpa);
comp.clearMiscFailures();
@@ -2865,8 +2862,10 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
const tracy_trace = trace(@src());
defer tracy_trace.end();
- // This arena is scoped to this one update.
const gpa = comp.gpa;
+ const io = comp.io;
+
+ // This arena is scoped to this one update.
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
@@ -2946,8 +2945,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
// In this case the cache hit contains the full set of file system inputs. Nice!
if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
if (comp.parent_whole_cache) |pwc| {
- pwc.mutex.lock();
- defer pwc.mutex.unlock();
+ try pwc.mutex.lock(io);
+ defer pwc.mutex.unlock(io);
try man.populateOtherManifest(pwc.manifest, pwc.prefix_map);
}
@@ -3066,7 +3065,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
comp.link_prog_node = .none;
};
- try comp.performAllTheWork(main_progress_node);
+ try comp.performAllTheWork(main_progress_node, arena);
if (comp.zcu) |zcu| {
const pt: Zcu.PerThread = .activate(zcu, .main);
@@ -3132,8 +3131,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
.whole => |whole| {
if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
if (comp.parent_whole_cache) |pwc| {
- pwc.mutex.lock();
- defer pwc.mutex.unlock();
+ try pwc.mutex.lock(io);
+ defer pwc.mutex.unlock(io);
try man.populateOtherManifest(pwc.manifest, pwc.prefix_map);
}
@@ -3234,6 +3233,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
/// Thread-safe. Assumes that `comp.mutex` is *not* already held by the caller.
pub fn appendFileSystemInput(comp: *Compilation, path: Compilation.Path) Allocator.Error!void {
const gpa = comp.gpa;
+ const io = comp.io;
const fsi = comp.file_system_inputs orelse return;
const prefixes = comp.cache_parent.prefixes();
@@ -3253,8 +3253,8 @@ pub fn appendFileSystemInput(comp: *Compilation, path: Compilation.Path) Allocat
);
// There may be concurrent calls to this function from C object workers and/or the main thread.
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
try fsi.ensureUnusedCapacity(gpa, path.sub_path.len + 3);
if (fsi.items.len > 0) fsi.appendAssumeCapacity(0);
@@ -3305,6 +3305,7 @@ fn flush(
arena: Allocator,
tid: Zcu.PerThread.Id,
) Allocator.Error!void {
+ const io = comp.io;
if (comp.zcu) |zcu| {
if (zcu.llvm_object) |llvm_object| {
const pt: Zcu.PerThread = .activate(zcu, tid);
@@ -3317,8 +3318,8 @@ fn flush(
var timer = comp.startTimer();
defer if (timer.finish()) |ns| {
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
comp.time_report.?.stats.real_ns_llvm_emit = ns;
};
@@ -3362,8 +3363,8 @@ fn flush(
if (comp.bin_file) |lf| {
var timer = comp.startTimer();
defer if (timer.finish()) |ns| {
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
comp.time_report.?.stats.real_ns_link_flush = ns;
};
// This is needed before reading the error flags.
@@ -4575,44 +4576,277 @@ pub fn unableToLoadZcuFile(
fn performAllTheWork(
comp: *Compilation,
main_progress_node: std.Progress.Node,
+ update_arena: Allocator,
) JobError!void {
- // Regardless of errors, `comp.zcu` needs to update its generation number.
defer if (comp.zcu) |zcu| {
+ zcu.codegen_task_pool.cancel(zcu);
+ // Regardless of errors, `comp.zcu` needs to update its generation number.
zcu.generation += 1;
};
+ const io = comp.io;
+
// This is awkward: we don't want to start the timer until later, but we won't want to stop it
// until the wait groups finish. That means we need do do this.
var decl_work_timer: ?Timer = null;
defer commit_timer: {
const t = &(decl_work_timer orelse break :commit_timer);
const ns = t.finish() orelse break :commit_timer;
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
comp.time_report.?.stats.real_ns_decls = ns;
}
- // Here we queue up all the AstGen tasks first, followed by C object compilation.
- // We wait until the AstGen tasks are all completed before proceeding to the
- // (at least for now) single-threaded main work queue. However, C object compilation
- // only needs to be finished by the end of this function.
-
- var work_queue_wait_group: WaitGroup = .{};
- defer work_queue_wait_group.wait();
+ var misc_group: Io.Group = .init;
+ defer misc_group.cancel(io);
- comp.link_task_wait_group.reset();
- defer comp.link_task_wait_group.wait();
+ try comp.link_queue.start(comp, update_arena);
+ defer comp.link_queue.cancel(io);
- // Already-queued prelink tasks
- comp.link_prog_node.increaseEstimatedTotalItems(comp.link_task_queue.queued_prelink.items.len);
- comp.link_task_queue.start(comp);
+ misc_group.concurrent(io, dispatchPrelinkWork, .{ comp, main_progress_node }) catch |err| switch (err) {
+ error.ConcurrencyUnavailable => {
+ // Do it immediately so that the link queue isn't blocked
+ dispatchPrelinkWork(comp, main_progress_node);
+ },
+ };
if (comp.emit_docs != null) {
dev.check(.docs_emit);
- comp.thread_pool.spawnWg(&work_queue_wait_group, workerDocsCopy, .{comp});
- work_queue_wait_group.spawnManager(workerDocsWasm, .{ comp, main_progress_node });
+ misc_group.async(io, workerDocsCopy, .{comp});
+ misc_group.async(io, workerDocsWasm, .{ comp, main_progress_node });
+ }
+
+ if (comp.zcu) |zcu| {
+ const astgen_frame = tracy.namedFrame("astgen");
+ defer astgen_frame.end();
+
+ const zir_prog_node = main_progress_node.start("AST Lowering", 0);
+ defer zir_prog_node.end();
+
+ var timer = comp.startTimer();
+ defer if (timer.finish()) |ns| {
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
+ comp.time_report.?.stats.real_ns_files = ns;
+ };
+
+ const gpa = comp.gpa;
+
+ var astgen_group: Io.Group = .init;
+ defer astgen_group.cancel(io);
+
+ // We cannot reference `zcu.import_table` after we spawn any `workerUpdateFile` jobs,
+ // because on single-threaded targets the worker will be run eagerly, meaning the
+ // `import_table` could be mutated, and not even holding `comp.mutex` will save us. So,
+ // build up a list of the files to update *before* we spawn any jobs.
+ var astgen_work_items: std.MultiArrayList(struct {
+ file_index: Zcu.File.Index,
+ file: *Zcu.File,
+ }) = .empty;
+ defer astgen_work_items.deinit(gpa);
+ // Not every item in `import_table` will need updating, because some are builtin.zig
+ // files. However, most will, so let's just reserve sufficient capacity upfront.
+ try astgen_work_items.ensureTotalCapacity(gpa, zcu.import_table.count());
+ for (zcu.import_table.keys()) |file_index| {
+ const file = zcu.fileByIndex(file_index);
+ if (file.is_builtin) {
+ // This is a `builtin.zig`, so updating is redundant. However, we want to make
+ // sure the file contents are still correct on disk, since it can improve the
+ // debugging experience better. That job only needs `file`, so we can kick it
+ // off right now.
+ astgen_group.async(io, workerUpdateBuiltinFile, .{ comp, file });
+ continue;
+ }
+ astgen_work_items.appendAssumeCapacity(.{
+ .file_index = file_index,
+ .file = file,
+ });
+ }
+
+ // Now that we're not going to touch `zcu.import_table` again, we can spawn `workerUpdateFile` jobs.
+ for (astgen_work_items.items(.file_index), astgen_work_items.items(.file)) |file_index, file| {
+ astgen_group.async(io, workerUpdateFile, .{
+ comp, file, file_index, zir_prog_node, &astgen_group,
+ });
+ }
+
+ // On the other hand, it's fine to directly iterate `zcu.embed_table.keys()` here
+ // because `workerUpdateEmbedFile` can't invalidate it. The different here is that one
+ // `@embedFile` can't trigger analysis of a new `@embedFile`!
+ for (0.., zcu.embed_table.keys()) |ef_index_usize, ef| {
+ const ef_index: Zcu.EmbedFile.Index = @enumFromInt(ef_index_usize);
+ astgen_group.async(io, workerUpdateEmbedFile, .{
+ comp, ef_index, ef,
+ });
+ }
+
+ astgen_group.wait(io);
+ }
+
+ if (comp.zcu) |zcu| {
+ const pt: Zcu.PerThread = .activate(zcu, .main);
+ defer pt.deactivate();
+
+ const gpa = zcu.gpa;
+
+ // On an incremental update, a source file might become "dead", in that all imports of
+ // the file were removed. This could even change what module the file belongs to! As such,
+ // we do a traversal over the files, to figure out which ones are alive and the modules
+ // they belong to.
+ const any_fatal_files = try pt.computeAliveFiles();
+
+ // If the cache mode is `whole`, add every alive source file to the manifest.
+ switch (comp.cache_use) {
+ .whole => |whole| if (whole.cache_manifest) |man| {
+ for (zcu.alive_files.keys()) |file_index| {
+ const file = zcu.fileByIndex(file_index);
+
+ switch (file.status) {
+ .never_loaded => unreachable, // AstGen tried to load it
+ .retryable_failure => continue, // the file cannot be read; this is a guaranteed error
+ .astgen_failure, .success => {}, // the file was read successfully
+ }
+
+ const path = try file.path.toAbsolute(comp.dirs, gpa);
+ defer gpa.free(path);
+
+ const result = res: {
+ try whole.cache_manifest_mutex.lock(io);
+ defer whole.cache_manifest_mutex.unlock(io);
+ if (file.source) |source| {
+ break :res man.addFilePostContents(path, source, file.stat);
+ } else {
+ break :res man.addFilePost(path);
+ }
+ };
+ result catch |err| switch (err) {
+ error.OutOfMemory => |e| return e,
+ else => {
+ try pt.reportRetryableFileError(file_index, "unable to update cache: {s}", .{@errorName(err)});
+ continue;
+ },
+ };
+ }
+ },
+ .none, .incremental => {},
+ }
+
+ if (any_fatal_files or
+ zcu.multi_module_err != null or
+ zcu.failed_imports.items.len > 0 or
+ comp.alloc_failure_occurred)
+ {
+ // We give up right now! No updating of ZIR refs, no nothing. The idea is that this prevents
+ // us from invalidating lots of incremental dependencies due to files with e.g. parse errors.
+ // However, this means our analysis data is invalid, so we want to omit all analysis errors.
+ zcu.skip_analysis_this_update = true;
+ // Since we're skipping analysis, there are no ZCU link tasks.
+ comp.link_queue.finishZcuQueue(comp);
+ // Let other compilation work finish to collect as many errors as possible.
+ misc_group.wait(io);
+ comp.link_queue.wait(io);
+ return;
+ }
+
+ if (comp.time_report) |*tr| {
+ tr.stats.n_reachable_files = @intCast(zcu.alive_files.count());
+ }
+
+ if (comp.config.incremental) {
+ const update_zir_refs_node = main_progress_node.start("Update ZIR References", 0);
+ defer update_zir_refs_node.end();
+ try pt.updateZirRefs();
+ }
+ try zcu.flushRetryableFailures();
+
+ // It's analysis time! Queue up our initial analysis.
+ for (zcu.analysisRoots()) |mod| {
+ try comp.queueJob(.{ .analyze_mod = mod });
+ }
+
+ zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
+ if (comp.bin_file != null) {
+ zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0);
+ }
+ // We increment `pending_codegen_jobs` so that it doesn't reach 0 until after analysis finishes.
+ // That prevents the "Code Generation" node from constantly disappearing and reappearing when
+ // we're probably going to analyze more functions at some point.
+ assert(zcu.pending_codegen_jobs.swap(1, .monotonic) == 0); // don't let this become 0 until analysis finishes
+ }
+ // When analysis ends, delete the progress nodes for "Semantic Analysis" and possibly "Code Generation".
+ defer if (comp.zcu) |zcu| {
+ zcu.sema_prog_node.end();
+ zcu.sema_prog_node = .none;
+ if (zcu.pending_codegen_jobs.fetchSub(1, .monotonic) == 1) {
+ // Decremented to 0, so all done.
+ zcu.codegen_prog_node.end();
+ zcu.codegen_prog_node = .none;
+ }
+ };
+
+ if (comp.zcu) |zcu| {
+ if (!zcu.backendSupportsFeature(.separate_thread)) {
+ // Close the ZCU task queue. Prelink may still be running, but the closed
+ // queue will cause the linker task to exit once prelink finishes. The
+ // closed queue also communicates to `enqueueZcu` that it should wait for
+ // the linker task to finish and then run ZCU tasks serially.
+ comp.link_queue.finishZcuQueue(comp);
+ }
+ }
+
+ if (comp.zcu != null) {
+ // Start the timer for the "decls" part of the pipeline (Sema, CodeGen, link).
+ decl_work_timer = comp.startTimer();
}
+ work: while (true) {
+ for (&comp.work_queues) |*work_queue| if (work_queue.popFront()) |job| {
+ try processOneJob(
+ @intFromEnum(Zcu.PerThread.Id.main),
+ comp,
+ job,
+ );
+ continue :work;
+ };
+ if (comp.zcu) |zcu| {
+ // If there's no work queued, check if there's anything outdated
+ // which we need to work on, and queue it if so.
+ if (try zcu.findOutdatedToAnalyze()) |outdated| {
+ try comp.queueJob(switch (outdated.unwrap()) {
+ .func => |f| .{ .analyze_func = f },
+ .memoized_state,
+ .@"comptime",
+ .nav_ty,
+ .nav_val,
+ .type,
+ => .{ .analyze_comptime_unit = outdated },
+ });
+ continue;
+ }
+ zcu.sema_prog_node.end();
+ zcu.sema_prog_node = .none;
+ }
+ break;
+ }
+
+ comp.link_queue.finishZcuQueue(comp);
+
+ // Main thread work is all done, now just wait for all async work.
+ misc_group.wait(io);
+ comp.link_queue.wait(io);
+}
+
+fn dispatchPrelinkWork(comp: *Compilation, main_progress_node: std.Progress.Node) void {
+ const io = comp.io;
+
+ var prelink_group: Io.Group = .init;
+ defer prelink_group.cancel(io);
+
+ comp.queuePrelinkTasks(comp.oneshot_prelink_tasks.items) catch |err| switch (err) {
+ error.Canceled => return,
+ };
+ comp.oneshot_prelink_tasks.clearRetainingCapacity();
+
// In case it failed last time, try again. `clearMiscFailures` was already
// called at the start of `update`.
if (comp.queued_jobs.compiler_rt_lib and comp.compiler_rt_lib == null) {
@@ -4620,8 +4854,7 @@ fn performAllTheWork(
// compiler-rt due to LLD bugs as well, e.g.:
//
// https://github.com/llvm/llvm-project/issues/43698#issuecomment-2542660611
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildRt, .{
+ prelink_group.async(io, buildRt, .{
comp,
"compiler_rt.zig",
"compiler_rt",
@@ -4638,8 +4871,7 @@ fn performAllTheWork(
}
if (comp.queued_jobs.compiler_rt_obj and comp.compiler_rt_obj == null) {
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildRt, .{
+ prelink_group.async(io, buildRt, .{
comp,
"compiler_rt.zig",
"compiler_rt",
@@ -4657,8 +4889,7 @@ fn performAllTheWork(
// hack for stage2_x86_64 + coff
if (comp.queued_jobs.compiler_rt_dyn_lib and comp.compiler_rt_dyn_lib == null) {
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildRt, .{
+ prelink_group.async(io, buildRt, .{
comp,
"compiler_rt.zig",
"compiler_rt",
@@ -4675,8 +4906,7 @@ fn performAllTheWork(
}
if (comp.queued_jobs.fuzzer_lib and comp.fuzzer_lib == null) {
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildRt, .{
+ prelink_group.async(io, buildRt, .{
comp,
"fuzzer.zig",
"fuzzer",
@@ -4690,8 +4920,7 @@ fn performAllTheWork(
}
if (comp.queued_jobs.ubsan_rt_lib and comp.ubsan_rt_lib == null) {
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildRt, .{
+ prelink_group.async(io, buildRt, .{
comp,
"ubsan_rt.zig",
"ubsan_rt",
@@ -4707,8 +4936,7 @@ fn performAllTheWork(
}
if (comp.queued_jobs.ubsan_rt_obj and comp.ubsan_rt_obj == null) {
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildRt, .{
+ prelink_group.async(io, buildRt, .{
comp,
"ubsan_rt.zig",
"ubsan_rt",
@@ -4724,310 +4952,93 @@ fn performAllTheWork(
}
if (comp.queued_jobs.glibc_shared_objects) {
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildGlibcSharedObjects, .{ comp, main_progress_node });
+ prelink_group.async(io, buildGlibcSharedObjects, .{ comp, main_progress_node });
}
if (comp.queued_jobs.freebsd_shared_objects) {
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildFreeBSDSharedObjects, .{ comp, main_progress_node });
+ prelink_group.async(io, buildFreeBSDSharedObjects, .{ comp, main_progress_node });
}
if (comp.queued_jobs.netbsd_shared_objects) {
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildNetBSDSharedObjects, .{ comp, main_progress_node });
+ prelink_group.async(io, buildNetBSDSharedObjects, .{ comp, main_progress_node });
}
if (comp.queued_jobs.libunwind) {
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildLibUnwind, .{ comp, main_progress_node });
+ prelink_group.async(io, buildLibUnwind, .{ comp, main_progress_node });
}
if (comp.queued_jobs.libcxx) {
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildLibCxx, .{ comp, main_progress_node });
+ prelink_group.async(io, buildLibCxx, .{ comp, main_progress_node });
}
if (comp.queued_jobs.libcxxabi) {
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildLibCxxAbi, .{ comp, main_progress_node });
+ prelink_group.async(io, buildLibCxxAbi, .{ comp, main_progress_node });
}
if (comp.queued_jobs.libtsan) {
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildLibTsan, .{ comp, main_progress_node });
+ prelink_group.async(io, buildLibTsan, .{ comp, main_progress_node });
}
if (comp.queued_jobs.zigc_lib and comp.zigc_static_lib == null) {
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildLibZigC, .{ comp, main_progress_node });
+ prelink_group.async(io, buildLibZigC, .{ comp, main_progress_node });
}
for (0..@typeInfo(musl.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.musl_crt_file[i]) {
const tag: musl.CrtFile = @enumFromInt(i);
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildMuslCrtFile, .{ comp, tag, main_progress_node });
+ prelink_group.async(io, buildMuslCrtFile, .{ comp, tag, main_progress_node });
}
}
for (0..@typeInfo(glibc.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.glibc_crt_file[i]) {
const tag: glibc.CrtFile = @enumFromInt(i);
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildGlibcCrtFile, .{ comp, tag, main_progress_node });
+ prelink_group.async(io, buildGlibcCrtFile, .{ comp, tag, main_progress_node });
}
}
for (0..@typeInfo(freebsd.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.freebsd_crt_file[i]) {
const tag: freebsd.CrtFile = @enumFromInt(i);
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildFreeBSDCrtFile, .{ comp, tag, main_progress_node });
+ prelink_group.async(io, buildFreeBSDCrtFile, .{ comp, tag, main_progress_node });
}
}
for (0..@typeInfo(netbsd.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.netbsd_crt_file[i]) {
const tag: netbsd.CrtFile = @enumFromInt(i);
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildNetBSDCrtFile, .{ comp, tag, main_progress_node });
+ prelink_group.async(io, buildNetBSDCrtFile, .{ comp, tag, main_progress_node });
}
}
for (0..@typeInfo(wasi_libc.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.wasi_libc_crt_file[i]) {
const tag: wasi_libc.CrtFile = @enumFromInt(i);
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildWasiLibcCrtFile, .{ comp, tag, main_progress_node });
+ prelink_group.async(io, buildWasiLibcCrtFile, .{ comp, tag, main_progress_node });
}
}
for (0..@typeInfo(mingw.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.mingw_crt_file[i]) {
const tag: mingw.CrtFile = @enumFromInt(i);
- comp.link_task_queue.startPrelinkItem();
- comp.link_task_wait_group.spawnManager(buildMingwCrtFile, .{ comp, tag, main_progress_node });
- }
- }
-
- {
- const astgen_frame = tracy.namedFrame("astgen");
- defer astgen_frame.end();
-
- const zir_prog_node = main_progress_node.start("AST Lowering", 0);
- defer zir_prog_node.end();
-
- var timer = comp.startTimer();
- defer if (timer.finish()) |ns| {
- comp.mutex.lock();
- defer comp.mutex.unlock();
- comp.time_report.?.stats.real_ns_files = ns;
- };
-
- var astgen_wait_group: WaitGroup = .{};
- defer astgen_wait_group.wait();
-
- if (comp.zcu) |zcu| {
- const gpa = zcu.gpa;
-
- // We cannot reference `zcu.import_table` after we spawn any `workerUpdateFile` jobs,
- // because on single-threaded targets the worker will be run eagerly, meaning the
- // `import_table` could be mutated, and not even holding `comp.mutex` will save us. So,
- // build up a list of the files to update *before* we spawn any jobs.
- var astgen_work_items: std.MultiArrayList(struct {
- file_index: Zcu.File.Index,
- file: *Zcu.File,
- }) = .empty;
- defer astgen_work_items.deinit(gpa);
- // Not every item in `import_table` will need updating, because some are builtin.zig
- // files. However, most will, so let's just reserve sufficient capacity upfront.
- try astgen_work_items.ensureTotalCapacity(gpa, zcu.import_table.count());
- for (zcu.import_table.keys()) |file_index| {
- const file = zcu.fileByIndex(file_index);
- if (file.is_builtin) {
- // This is a `builtin.zig`, so updating is redundant. However, we want to make
- // sure the file contents are still correct on disk, since it can improve the
- // debugging experience better. That job only needs `file`, so we can kick it
- // off right now.
- comp.thread_pool.spawnWg(&astgen_wait_group, workerUpdateBuiltinFile, .{ comp, file });
- continue;
- }
- astgen_work_items.appendAssumeCapacity(.{
- .file_index = file_index,
- .file = file,
- });
- }
-
- // Now that we're not going to touch `zcu.import_table` again, we can spawn `workerUpdateFile` jobs.
- for (astgen_work_items.items(.file_index), astgen_work_items.items(.file)) |file_index, file| {
- comp.thread_pool.spawnWgId(&astgen_wait_group, workerUpdateFile, .{
- comp, file, file_index, zir_prog_node, &astgen_wait_group,
- });
- }
-
- // On the other hand, it's fine to directly iterate `zcu.embed_table.keys()` here
- // because `workerUpdateEmbedFile` can't invalidate it. The different here is that one
- // `@embedFile` can't trigger analysis of a new `@embedFile`!
- for (0.., zcu.embed_table.keys()) |ef_index_usize, ef| {
- const ef_index: Zcu.EmbedFile.Index = @enumFromInt(ef_index_usize);
- comp.thread_pool.spawnWgId(&astgen_wait_group, workerUpdateEmbedFile, .{
- comp, ef_index, ef,
- });
- }
- }
-
- while (comp.c_object_work_queue.popFront()) |c_object| {
- comp.link_task_queue.startPrelinkItem();
- comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateCObject, .{
- comp, c_object, main_progress_node,
- });
- }
-
- while (comp.win32_resource_work_queue.popFront()) |win32_resource| {
- comp.link_task_queue.startPrelinkItem();
- comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateWin32Resource, .{
- comp, win32_resource, main_progress_node,
- });
- }
- }
-
- if (comp.zcu) |zcu| {
- const pt: Zcu.PerThread = .activate(zcu, .main);
- defer pt.deactivate();
-
- const gpa = zcu.gpa;
-
- // On an incremental update, a source file might become "dead", in that all imports of
- // the file were removed. This could even change what module the file belongs to! As such,
- // we do a traversal over the files, to figure out which ones are alive and the modules
- // they belong to.
- const any_fatal_files = try pt.computeAliveFiles();
-
- // If the cache mode is `whole`, add every alive source file to the manifest.
- switch (comp.cache_use) {
- .whole => |whole| if (whole.cache_manifest) |man| {
- for (zcu.alive_files.keys()) |file_index| {
- const file = zcu.fileByIndex(file_index);
-
- switch (file.status) {
- .never_loaded => unreachable, // AstGen tried to load it
- .retryable_failure => continue, // the file cannot be read; this is a guaranteed error
- .astgen_failure, .success => {}, // the file was read successfully
- }
-
- const path = try file.path.toAbsolute(comp.dirs, gpa);
- defer gpa.free(path);
-
- const result = res: {
- whole.cache_manifest_mutex.lock();
- defer whole.cache_manifest_mutex.unlock();
- if (file.source) |source| {
- break :res man.addFilePostContents(path, source, file.stat);
- } else {
- break :res man.addFilePost(path);
- }
- };
- result catch |err| switch (err) {
- error.OutOfMemory => |e| return e,
- else => {
- try pt.reportRetryableFileError(file_index, "unable to update cache: {s}", .{@errorName(err)});
- continue;
- },
- };
- }
- },
- .none, .incremental => {},
- }
-
- if (any_fatal_files or
- zcu.multi_module_err != null or
- zcu.failed_imports.items.len > 0 or
- comp.alloc_failure_occurred)
- {
- // We give up right now! No updating of ZIR refs, no nothing. The idea is that this prevents
- // us from invalidating lots of incremental dependencies due to files with e.g. parse errors.
- // However, this means our analysis data is invalid, so we want to omit all analysis errors.
- zcu.skip_analysis_this_update = true;
- return;
- }
-
- if (comp.time_report) |*tr| {
- tr.stats.n_reachable_files = @intCast(zcu.alive_files.count());
- }
-
- if (comp.config.incremental) {
- const update_zir_refs_node = main_progress_node.start("Update ZIR References", 0);
- defer update_zir_refs_node.end();
- try pt.updateZirRefs();
- }
- try zcu.flushRetryableFailures();
-
- // It's analysis time! Queue up our initial analysis.
- for (zcu.analysisRoots()) |mod| {
- try comp.queueJob(.{ .analyze_mod = mod });
- }
-
- zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
- if (comp.bin_file != null) {
- zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0);
+ prelink_group.async(io, buildMingwCrtFile, .{ comp, tag, main_progress_node });
}
- // We increment `pending_codegen_jobs` so that it doesn't reach 0 until after analysis finishes.
- // That prevents the "Code Generation" node from constantly disappearing and reappearing when
- // we're probably going to analyze more functions at some point.
- assert(zcu.pending_codegen_jobs.swap(1, .monotonic) == 0); // don't let this become 0 until analysis finishes
}
- // When analysis ends, delete the progress nodes for "Semantic Analysis" and possibly "Code Generation".
- defer if (comp.zcu) |zcu| {
- zcu.sema_prog_node.end();
- zcu.sema_prog_node = .none;
- if (zcu.pending_codegen_jobs.rmw(.Sub, 1, .monotonic) == 1) {
- // Decremented to 0, so all done.
- zcu.codegen_prog_node.end();
- zcu.codegen_prog_node = .none;
- }
- };
-
- // We aren't going to queue any more prelink tasks.
- comp.link_task_queue.finishPrelinkItem(comp);
- if (!comp.separateCodegenThreadOk()) {
- // Waits until all input files have been parsed.
- comp.link_task_wait_group.wait();
- comp.link_task_wait_group.reset();
- std.log.scoped(.link).debug("finished waiting for link_task_wait_group", .{});
+ while (comp.c_object_work_queue.popFront()) |c_object| {
+ prelink_group.async(io, workerUpdateCObject, .{
+ comp, c_object, main_progress_node,
+ });
}
- if (comp.zcu != null) {
- // Start the timer for the "decls" part of the pipeline (Sema, CodeGen, link).
- decl_work_timer = comp.startTimer();
+ while (comp.win32_resource_work_queue.popFront()) |win32_resource| {
+ prelink_group.async(io, workerUpdateWin32Resource, .{
+ comp, win32_resource, main_progress_node,
+ });
}
- work: while (true) {
- for (&comp.work_queues) |*work_queue| if (work_queue.popFront()) |job| {
- try processOneJob(@intFromEnum(Zcu.PerThread.Id.main), comp, job);
- continue :work;
- };
- if (comp.zcu) |zcu| {
- // If there's no work queued, check if there's anything outdated
- // which we need to work on, and queue it if so.
- if (try zcu.findOutdatedToAnalyze()) |outdated| {
- try comp.queueJob(switch (outdated.unwrap()) {
- .func => |f| .{ .analyze_func = f },
- .memoized_state,
- .@"comptime",
- .nav_ty,
- .nav_val,
- .type,
- => .{ .analyze_comptime_unit = outdated },
- });
- continue;
- }
- zcu.sema_prog_node.end();
- zcu.sema_prog_node = .none;
- }
- break;
- }
+ prelink_group.wait(io);
+ comp.link_queue.finishPrelinkQueue(comp);
}
const JobError = Allocator.Error || Io.Cancelable;
@@ -5040,58 +5051,38 @@ pub fn queueJobs(comp: *Compilation, jobs: []const Job) !void {
for (jobs) |job| try comp.queueJob(job);
}
-fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void {
+fn processOneJob(
+ tid: usize,
+ comp: *Compilation,
+ job: Job,
+) JobError!void {
switch (job) {
.codegen_func => |func| {
const zcu = comp.zcu.?;
const gpa = zcu.gpa;
- var air = func.air;
- errdefer {
- zcu.codegen_prog_node.completeOne();
- comp.link_prog_node.completeOne();
- air.deinit(gpa);
- }
- if (!air.typesFullyResolved(zcu)) {
+ var owned_air: ?Air = func.air;
+ defer if (owned_air) |*air| air.deinit(gpa);
+
+ if (!owned_air.?.typesFullyResolved(zcu)) {
// Type resolution failed in a way which affects this function. This is a transitive
// failure, but it doesn't need recording, because this function semantically depends
// on the failed type, so when it is changed the function is updated.
zcu.codegen_prog_node.completeOne();
comp.link_prog_node.completeOne();
- air.deinit(gpa);
return;
}
- const shared_mir = try gpa.create(link.ZcuTask.LinkFunc.SharedMir);
- shared_mir.* = .{
- .status = .init(.pending),
- .value = undefined,
- };
- assert(zcu.pending_codegen_jobs.rmw(.Add, 1, .monotonic) > 0); // the "Code Generation" node hasn't been ended
- // This value is used as a heuristic to avoid queueing too much AIR/MIR at once (hence
- // using a lot of memory). If this would cause too many AIR bytes to be in-flight, we
- // will block on the `dispatchZcuLinkTask` call below.
- const air_bytes: u32 = @intCast(air.instructions.len * 5 + air.extra.items.len * 4);
- if (comp.separateCodegenThreadOk()) {
- // `workerZcuCodegen` takes ownership of `air`.
- comp.thread_pool.spawnWgId(&comp.link_task_wait_group, workerZcuCodegen, .{ comp, func.func, air, shared_mir });
- comp.dispatchZcuLinkTask(tid, .{ .link_func = .{
- .func = func.func,
- .mir = shared_mir,
- .air_bytes = air_bytes,
- } });
- } else {
- {
- const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
- defer pt.deactivate();
- pt.runCodegen(func.func, &air, shared_mir);
- }
- assert(shared_mir.status.load(.monotonic) != .pending);
- comp.dispatchZcuLinkTask(tid, .{ .link_func = .{
- .func = func.func,
- .mir = shared_mir,
- .air_bytes = air_bytes,
- } });
- air.deinit(gpa);
- }
+
+ // Some linkers need to refer to the AIR. In that case, the linker is not running
+ // concurrently, so we'll just keep ownership of the AIR for ourselves instead of
+ // letting the codegen job destroy it.
+ const disown_air = zcu.backendSupportsFeature(.separate_thread);
+
+ // Begin the codegen task. If the codegen/link queue is backed up, this might
+ // block until the linker is able to process some tasks.
+ const codegen_task = try zcu.codegen_task_pool.start(zcu, func.func, &owned_air.?, disown_air);
+ if (disown_air) owned_air = null;
+
+ try comp.link_queue.enqueueZcu(comp, tid, .{ .link_func = codegen_task });
},
.link_nav => |nav_index| {
const zcu = comp.zcu.?;
@@ -5111,7 +5102,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void {
comp.link_prog_node.completeOne();
return;
}
- comp.dispatchZcuLinkTask(tid, .{ .link_nav = nav_index });
+ try comp.link_queue.enqueueZcu(comp, tid, .{ .link_nav = nav_index });
},
.link_type => |ty| {
const zcu = comp.zcu.?;
@@ -5123,10 +5114,10 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void {
comp.link_prog_node.completeOne();
return;
}
- comp.dispatchZcuLinkTask(tid, .{ .link_type = ty });
+ try comp.link_queue.enqueueZcu(comp, tid, .{ .link_type = ty });
},
- .update_line_number => |ti| {
- comp.dispatchZcuLinkTask(tid, .{ .update_line_number = ti });
+ .update_line_number => |tracked_inst| {
+ try comp.link_queue.enqueueZcu(comp, tid, .{ .update_line_number = tracked_inst });
},
.analyze_func => |func| {
const named_frame = tracy.namedFrame("analyze_func");
@@ -5220,12 +5211,6 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void {
}
}
-pub fn separateCodegenThreadOk(comp: *const Compilation) bool {
- if (InternPool.single_threaded) return false;
- const zcu = comp.zcu orelse return true;
- return zcu.backendSupportsFeature(.separate_thread);
-}
-
fn createDepFile(
comp: *Compilation,
depfile: []const u8,
@@ -5480,6 +5465,7 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU
var sub_create_diag: CreateDiagnostic = undefined;
const sub_compilation = Compilation.create(gpa, arena, io, &sub_create_diag, .{
+ .thread_limit = comp.thread_limit,
.dirs = dirs,
.self_exe_path = comp.self_exe_path,
.config = config,
@@ -5487,7 +5473,6 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU
.entry = .disabled,
.cache_mode = .whole,
.root_name = root_name,
- .thread_pool = comp.thread_pool,
.libc_installation = comp.libc_installation,
.emit_bin = .yes_cache,
.verbose_cc = comp.verbose_cc,
@@ -5541,13 +5526,15 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU
}
fn workerUpdateFile(
- tid: usize,
comp: *Compilation,
file: *Zcu.File,
file_index: Zcu.File.Index,
prog_node: std.Progress.Node,
- wg: *WaitGroup,
+ group: *Io.Group,
) void {
+ const tid = Compilation.getTid();
+ const io = comp.io;
+
const child_prog_node = prog_node.start(fs.path.basename(file.path.sub_path), 0);
defer child_prog_node.end();
@@ -5556,8 +5543,8 @@ fn workerUpdateFile(
pt.updateFile(file_index, file) catch |err| {
pt.reportRetryableFileError(file_index, "unable to load '{s}': {s}", .{ fs.path.basename(file.path.sub_path), @errorName(err) }) catch |oom| switch (oom) {
error.OutOfMemory => {
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
comp.setAllocFailure();
},
};
@@ -5587,14 +5574,14 @@ fn workerUpdateFile(
if (pt.discoverImport(file.path, import_path)) |res| switch (res) {
.module, .existing_file => {},
.new_file => |new| {
- comp.thread_pool.spawnWgId(wg, workerUpdateFile, .{
- comp, new.file, new.index, prog_node, wg,
+ group.async(io, workerUpdateFile, .{
+ comp, new.file, new.index, prog_node, group,
});
},
} else |err| switch (err) {
error.OutOfMemory => {
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
comp.setAllocFailure();
},
}
@@ -5610,17 +5597,20 @@ fn workerUpdateBuiltinFile(comp: *Compilation, file: *Zcu.File) void {
);
}
-fn workerUpdateEmbedFile(tid: usize, comp: *Compilation, ef_index: Zcu.EmbedFile.Index, ef: *Zcu.EmbedFile) void {
+fn workerUpdateEmbedFile(comp: *Compilation, ef_index: Zcu.EmbedFile.Index, ef: *Zcu.EmbedFile) void {
+ const tid = Compilation.getTid();
+ const io = comp.io;
comp.detectEmbedFileUpdate(@enumFromInt(tid), ef_index, ef) catch |err| switch (err) {
error.OutOfMemory => {
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
comp.setAllocFailure();
},
};
}
fn detectEmbedFileUpdate(comp: *Compilation, tid: Zcu.PerThread.Id, ef_index: Zcu.EmbedFile.Index, ef: *Zcu.EmbedFile) !void {
+ const io = comp.io;
const zcu = comp.zcu.?;
const pt: Zcu.PerThread = .activate(zcu, tid);
defer pt.deactivate();
@@ -5633,8 +5623,8 @@ fn detectEmbedFileUpdate(comp: *Compilation, tid: Zcu.PerThread.Id, ef_index: Zc
if (ef.val != .none and ef.val == old_val) return; // success, value unchanged
if (ef.val == .none and old_val == .none and ef.err == old_err) return; // failure, error unchanged
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
try zcu.markDependeeOutdated(.not_marked_po, .{ .embed_file = ef_index });
}
@@ -5777,8 +5767,8 @@ pub fn translateC(
switch (comp.cache_use) {
.whole => |whole| if (whole.cache_manifest) |whole_cache_manifest| {
- whole.cache_manifest_mutex.lock();
- defer whole.cache_manifest_mutex.unlock();
+ try whole.cache_manifest_mutex.lock(io);
+ defer whole.cache_manifest_mutex.unlock(io);
try whole_cache_manifest.addDepFilePost(cache_tmp_dir, dep_basename);
},
.incremental, .none => {},
@@ -5879,7 +5869,6 @@ fn workerUpdateCObject(
c_object: *CObject,
progress_node: std.Progress.Node,
) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
comp.updateCObject(c_object, progress_node) catch |err| switch (err) {
error.AnalysisFail => return,
else => {
@@ -5897,7 +5886,6 @@ fn workerUpdateWin32Resource(
win32_resource: *Win32Resource,
progress_node: std.Progress.Node,
) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
comp.updateWin32Resource(win32_resource, progress_node) catch |err| switch (err) {
error.AnalysisFail => return,
else => {
@@ -5915,21 +5903,6 @@ pub const RtOptions = struct {
allow_lto: bool = true,
};
-fn workerZcuCodegen(
- tid: usize,
- comp: *Compilation,
- func_index: InternPool.Index,
- orig_air: Air,
- out: *link.ZcuTask.LinkFunc.SharedMir,
-) void {
- var air = orig_air;
- // We own `air` now, so we are responsbile for freeing it.
- defer air.deinit(comp.gpa);
- const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
- defer pt.deactivate();
- pt.runCodegen(func_index, &air, out);
-}
-
fn buildRt(
comp: *Compilation,
root_source_name: []const u8,
@@ -5941,7 +5914,6 @@ fn buildRt(
options: RtOptions,
out: *?CrtFile,
) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
comp.buildOutputFromZig(
root_source_name,
root_name,
@@ -5960,7 +5932,6 @@ fn buildRt(
}
fn buildMuslCrtFile(comp: *Compilation, crt_file: musl.CrtFile, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
if (musl.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.musl_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@@ -5972,7 +5943,6 @@ fn buildMuslCrtFile(comp: *Compilation, crt_file: musl.CrtFile, prog_node: std.P
}
fn buildGlibcCrtFile(comp: *Compilation, crt_file: glibc.CrtFile, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
if (glibc.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.glibc_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@@ -5984,7 +5954,6 @@ fn buildGlibcCrtFile(comp: *Compilation, crt_file: glibc.CrtFile, prog_node: std
}
fn buildGlibcSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
if (glibc.buildSharedObjects(comp, prog_node)) |_| {
// The job should no longer be queued up since it succeeded.
comp.queued_jobs.glibc_shared_objects = false;
@@ -5995,7 +5964,6 @@ fn buildGlibcSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) voi
}
fn buildFreeBSDCrtFile(comp: *Compilation, crt_file: freebsd.CrtFile, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
if (freebsd.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.freebsd_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@@ -6007,7 +5975,6 @@ fn buildFreeBSDCrtFile(comp: *Compilation, crt_file: freebsd.CrtFile, prog_node:
}
fn buildFreeBSDSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
if (freebsd.buildSharedObjects(comp, prog_node)) |_| {
// The job should no longer be queued up since it succeeded.
comp.queued_jobs.freebsd_shared_objects = false;
@@ -6020,7 +5987,6 @@ fn buildFreeBSDSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) v
}
fn buildNetBSDCrtFile(comp: *Compilation, crt_file: netbsd.CrtFile, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
if (netbsd.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.netbsd_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@@ -6032,7 +5998,6 @@ fn buildNetBSDCrtFile(comp: *Compilation, crt_file: netbsd.CrtFile, prog_node: s
}
fn buildNetBSDSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
if (netbsd.buildSharedObjects(comp, prog_node)) |_| {
// The job should no longer be queued up since it succeeded.
comp.queued_jobs.netbsd_shared_objects = false;
@@ -6045,7 +6010,6 @@ fn buildNetBSDSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) vo
}
fn buildMingwCrtFile(comp: *Compilation, crt_file: mingw.CrtFile, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
if (mingw.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.mingw_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@@ -6057,7 +6021,6 @@ fn buildMingwCrtFile(comp: *Compilation, crt_file: mingw.CrtFile, prog_node: std
}
fn buildWasiLibcCrtFile(comp: *Compilation, crt_file: wasi_libc.CrtFile, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
if (wasi_libc.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.wasi_libc_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@@ -6069,7 +6032,6 @@ fn buildWasiLibcCrtFile(comp: *Compilation, crt_file: wasi_libc.CrtFile, prog_no
}
fn buildLibUnwind(comp: *Compilation, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
if (libunwind.buildStaticLib(comp, prog_node)) |_| {
comp.queued_jobs.libunwind = false;
} else |err| switch (err) {
@@ -6079,7 +6041,6 @@ fn buildLibUnwind(comp: *Compilation, prog_node: std.Progress.Node) void {
}
fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
if (libcxx.buildLibCxx(comp, prog_node)) |_| {
comp.queued_jobs.libcxx = false;
} else |err| switch (err) {
@@ -6089,7 +6050,6 @@ fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) void {
}
fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
if (libcxx.buildLibCxxAbi(comp, prog_node)) |_| {
comp.queued_jobs.libcxxabi = false;
} else |err| switch (err) {
@@ -6099,7 +6059,6 @@ fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) void {
}
fn buildLibTsan(comp: *Compilation, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
if (libtsan.buildTsan(comp, prog_node)) |_| {
comp.queued_jobs.libtsan = false;
} else |err| switch (err) {
@@ -6109,7 +6068,6 @@ fn buildLibTsan(comp: *Compilation, prog_node: std.Progress.Node) void {
}
fn buildLibZigC(comp: *Compilation, prog_node: std.Progress.Node) void {
- defer comp.link_task_queue.finishPrelinkItem(comp);
comp.buildOutputFromZig(
"c.zig",
"zigc",
@@ -6139,6 +6097,8 @@ fn reportRetryableWin32ResourceError(
win32_resource: *Win32Resource,
err: anyerror,
) error{OutOfMemory}!void {
+ const io = comp.io;
+
win32_resource.status = .failure_retryable;
var bundle: ErrorBundle.Wip = undefined;
@@ -6160,8 +6120,8 @@ fn reportRetryableWin32ResourceError(
});
const finished_bundle = try bundle.toOwnedBundle("");
{
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
try comp.failed_win32_resources.putNoClobber(comp.gpa, win32_resource, finished_bundle);
}
}
@@ -6186,8 +6146,8 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
if (c_object.clearStatus(gpa)) {
// There was previous failure.
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
// If the failure was OOM, there will not be an entry here, so we do
// not assert discard.
_ = comp.failed_c_objects.swapRemove(c_object);
@@ -6457,8 +6417,8 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
switch (comp.cache_use) {
.whole => |whole| {
if (whole.cache_manifest) |whole_cache_manifest| {
- whole.cache_manifest_mutex.lock();
- defer whole.cache_manifest_mutex.unlock();
+ try whole.cache_manifest_mutex.lock(io);
+ defer whole.cache_manifest_mutex.unlock(io);
try whole_cache_manifest.addDepFilePost(zig_cache_tmp_dir, dep_basename);
}
},
@@ -6503,7 +6463,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
},
};
- comp.queuePrelinkTasks(&.{.{ .load_object = c_object.status.success.object_path }});
+ try comp.queuePrelinkTasks(&.{.{ .load_object = c_object.status.success.object_path }});
}
fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32_resource_prog_node: std.Progress.Node) !void {
@@ -6517,6 +6477,8 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
const tracy_trace = trace(@src());
defer tracy_trace.end();
+ const io = comp.io;
+
const src_path = switch (win32_resource.src) {
.rc => |rc_src| rc_src.src_path,
.manifest => |src_path| src_path,
@@ -6531,8 +6493,8 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
if (win32_resource.clearStatus(comp.gpa)) {
// There was previous failure.
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
// If the failure was OOM, there will not be an entry here, so we do
// not assert discard.
_ = comp.failed_win32_resources.swapRemove(win32_resource);
@@ -6706,8 +6668,8 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
try man.addFilePost(dep_file_path);
switch (comp.cache_use) {
.whole => |whole| if (whole.cache_manifest) |whole_cache_manifest| {
- whole.cache_manifest_mutex.lock();
- defer whole.cache_manifest_mutex.unlock();
+ try whole.cache_manifest_mutex.lock(io);
+ defer whole.cache_manifest_mutex.unlock(io);
try whole_cache_manifest.addFilePost(dep_file_path);
},
.incremental, .none => {},
@@ -7428,8 +7390,9 @@ fn failCObjWithOwnedDiagBundle(
@branchHint(.cold);
assert(diag_bundle.diags.len > 0);
{
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ const io = comp.io;
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
{
errdefer diag_bundle.destroy(comp.gpa);
try comp.failed_c_objects.ensureUnusedCapacity(comp.gpa, 1);
@@ -7470,8 +7433,9 @@ fn failWin32ResourceWithOwnedBundle(
) error{ OutOfMemory, AnalysisFail } {
@branchHint(.cold);
{
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ const io = comp.io;
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
try comp.failed_win32_resources.putNoClobber(comp.gpa, win32_resource, err_bundle);
}
win32_resource.status = .failure;
@@ -7795,9 +7759,9 @@ pub fn lockAndSetMiscFailure(
comptime format: []const u8,
args: anytype,
) void {
- comp.mutex.lock();
- defer comp.mutex.unlock();
-
+ const io = comp.io;
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
return setMiscFailure(comp, tag, format, args);
}
@@ -7840,8 +7804,8 @@ pub fn updateSubCompilation(
defer errors.deinit(gpa);
if (errors.errorMessageCount() > 0) {
- parent_comp.mutex.lock();
- defer parent_comp.mutex.unlock();
+ parent_comp.mutex.lockUncancelable(parent_comp.io);
+ defer parent_comp.mutex.unlock(parent_comp.io);
try parent_comp.misc_failures.ensureUnusedCapacity(gpa, 1);
parent_comp.misc_failures.putAssumeCapacityNoClobber(misc_task, .{
.msg = try std.fmt.allocPrint(gpa, "sub-compilation of {t} failed", .{misc_task}),
@@ -7942,6 +7906,7 @@ fn buildOutputFromZig(
var sub_create_diag: CreateDiagnostic = undefined;
const sub_compilation = Compilation.create(gpa, arena, io, &sub_create_diag, .{
+ .thread_limit = comp.thread_limit,
.dirs = comp.dirs.withoutLocalCache(),
.cache_mode = .whole,
.parent_whole_cache = parent_whole_cache,
@@ -7949,7 +7914,6 @@ fn buildOutputFromZig(
.config = config,
.root_mod = root_mod,
.root_name = root_name,
- .thread_pool = comp.thread_pool,
.libc_installation = comp.libc_installation,
.emit_bin = .yes_cache,
.function_sections = true,
@@ -7980,7 +7944,7 @@ fn buildOutputFromZig(
assert(out.* == null);
out.* = crt_file;
- comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
+ try comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
}
pub const CrtFileOptions = struct {
@@ -8079,13 +8043,13 @@ pub fn build_crt_file(
var sub_create_diag: CreateDiagnostic = undefined;
const sub_compilation = Compilation.create(gpa, arena, io, &sub_create_diag, .{
+ .thread_limit = comp.thread_limit,
.dirs = comp.dirs.withoutLocalCache(),
.self_exe_path = comp.self_exe_path,
.cache_mode = .whole,
.config = config,
.root_mod = root_mod,
.root_name = root_name,
- .thread_pool = comp.thread_pool,
.libc_installation = comp.libc_installation,
.emit_bin = .yes_cache,
.function_sections = options.function_sections orelse false,
@@ -8114,18 +8078,18 @@ pub fn build_crt_file(
try comp.updateSubCompilation(sub_compilation, misc_task_tag, prog_node);
const crt_file = try sub_compilation.toCrtFile();
- comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
+ try comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
{
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
try comp.crt_files.ensureUnusedCapacity(gpa, 1);
comp.crt_files.putAssumeCapacityNoClobber(basename, crt_file);
}
}
-pub fn queuePrelinkTaskMode(comp: *Compilation, path: Cache.Path, config: *const Compilation.Config) void {
- comp.queuePrelinkTasks(switch (config.output_mode) {
+pub fn queuePrelinkTaskMode(comp: *Compilation, path: Cache.Path, config: *const Compilation.Config) Io.Cancelable!void {
+ try comp.queuePrelinkTasks(switch (config.output_mode) {
.Exe => unreachable,
.Obj => &.{.{ .load_object = path }},
.Lib => &.{switch (config.link_mode) {
@@ -8135,33 +8099,10 @@ pub fn queuePrelinkTaskMode(comp: *Compilation, path: Cache.Path, config: *const
});
}
-/// Only valid to call during `update`. Automatically handles queuing up a
-/// linker worker task if there is not already one.
-pub fn queuePrelinkTasks(comp: *Compilation, tasks: []const link.PrelinkTask) void {
+/// Only valid to call during `update`.
+pub fn queuePrelinkTasks(comp: *Compilation, tasks: []const link.PrelinkTask) Io.Cancelable!void {
comp.link_prog_node.increaseEstimatedTotalItems(tasks.len);
- comp.link_task_queue.enqueuePrelink(comp, tasks) catch |err| switch (err) {
- error.OutOfMemory => return comp.setAllocFailure(),
- };
-}
-
-/// The reason for the double-queue here is that the first queue ensures any
-/// resolve_type_fully tasks are complete before this dispatch function is called.
-fn dispatchZcuLinkTask(comp: *Compilation, tid: usize, task: link.ZcuTask) void {
- if (!comp.separateCodegenThreadOk()) {
- assert(tid == 0);
- if (task == .link_func) {
- assert(task.link_func.mir.status.load(.monotonic) != .pending);
- }
- link.doZcuTask(comp, tid, task);
- task.deinit(comp.zcu.?);
- return;
- }
- comp.link_task_queue.enqueueZcu(comp, task) catch |err| switch (err) {
- error.OutOfMemory => {
- task.deinit(comp.zcu.?);
- comp.setAllocFailure();
- },
- };
+ try comp.link_queue.enqueuePrelink(comp, tasks);
}
pub fn toCrtFile(comp: *Compilation) Allocator.Error!CrtFile {
@@ -8251,3 +8192,17 @@ pub fn compilerRtOptMode(comp: Compilation) std.builtin.OptimizeMode {
pub fn compilerRtStrip(comp: Compilation) bool {
return comp.root_mod.strip;
}
+
+/// This is a temporary workaround put in place to migrate from `std.Thread.Pool`
+/// to `std.Io.Threaded` for asynchronous/concurrent work. The eventual solution
+/// will likely involve significant changes to the `InternPool` implementation.
+pub fn getTid() usize {
+ if (my_tid == null) my_tid = next_tid.fetchAdd(1, .monotonic);
+ return my_tid.?;
+}
+pub fn setMainThread() void {
+ my_tid = 0;
+}
+/// TID 0 is reserved for the main thread.
+var next_tid: std.atomic.Value(usize) = .init(1);
+threadlocal var my_tid: ?usize = null;
diff --git a/src/IncrementalDebugServer.zig b/src/IncrementalDebugServer.zig
index d376ec146e..b4bc2c812e 100644
--- a/src/IncrementalDebugServer.zig
+++ b/src/IncrementalDebugServer.zig
@@ -14,57 +14,122 @@ comptime {
}
zcu: *Zcu,
-thread: ?std.Thread,
-running: std.atomic.Value(bool),
+future: ?Io.Future(void),
/// Held by our owner when an update is in-progress, and held by us when responding to a command.
/// So, essentially guards all access to `Compilation`, including `Zcu`.
-mutex: std.Thread.Mutex,
+mutex: std.Io.Mutex,
pub fn init(zcu: *Zcu) IncrementalDebugServer {
return .{
.zcu = zcu,
- .thread = null,
- .running = .init(true),
- .mutex = .{},
+ .future = null,
+ .mutex = .init,
};
}
pub fn deinit(ids: *IncrementalDebugServer) void {
- if (ids.thread) |t| {
- ids.running.store(false, .monotonic);
- t.join();
- }
+ const io = ids.zcu.comp.io;
+ if (ids.future) |*f| f.cancel(io);
}
const port = 7623;
pub fn spawn(ids: *IncrementalDebugServer) void {
+ const io = ids.zcu.comp.io;
std.debug.print("spawning incremental debug server on port {d}\n", .{port});
- ids.thread = std.Thread.spawn(.{ .allocator = ids.zcu.comp.arena }, runThread, .{ids}) catch |err|
- std.process.fatal("failed to spawn incremental debug server: {s}", .{@errorName(err)});
+ ids.future = io.concurrent(runServer, .{ids}) catch |err|
+ std.process.fatal("failed to start incremental debug server: {s}", .{@errorName(err)});
}
-fn runThread(ids: *IncrementalDebugServer) void {
- const gpa = ids.zcu.gpa;
+fn runServer(ids: *IncrementalDebugServer) void {
const io = ids.zcu.comp.io;
- var cmd_buf: [1024]u8 = undefined;
- var text_out: std.ArrayList(u8) = .empty;
- defer text_out.deinit(gpa);
-
- const addr: std.Io.net.IpAddress = .{ .ip6 = .loopback(port) };
- var server = addr.listen(io, .{}) catch @panic("IncrementalDebugServer: failed to listen");
+ const addr: Io.net.IpAddress = .{ .ip6 = .loopback(port) };
+ var server = addr.listen(io, .{}) catch |err| switch (err) {
+ error.Canceled => return,
+ else => |e| {
+ log.err("listen failed ({t}); closing server", .{e});
+ return;
+ },
+ };
defer server.deinit(io);
- var stream = server.accept(io) catch @panic("IncrementalDebugServer: failed to accept");
- defer stream.close(io);
- var stream_reader = stream.reader(io, &cmd_buf);
- var stream_writer = stream.writer(io, &.{});
+ while (true) {
+ var stream = server.accept(io) catch |err| switch (err) {
+ error.Canceled => return,
+ error.ConnectionAborted => {
+ log.warn("client disconnected during accept", .{});
+ continue;
+ },
+ else => |e| {
+ log.err("accept failed ({t})", .{e});
+ return;
+ },
+ };
+ defer stream.close(io);
+ log.info("client '{f}' connected", .{stream.socket.address});
+ var cmd_buf: [1024]u8 = undefined;
+ var reader = stream.reader(io, &cmd_buf);
+ var writer = stream.writer(io, &.{});
+ ids.serveStream(&reader.interface, &writer.interface) catch |orig_err| {
+ const actual_err = switch (orig_err) {
+ error.Canceled,
+ error.OutOfMemory,
+ error.EndOfStream,
+ error.StreamTooLong,
+ => |e| e,
+
+ error.ReadFailed => reader.err.?,
+ error.WriteFailed => writer.err.?,
+ };
+ switch (actual_err) {
+ error.Canceled => return,
+
+ error.OutOfMemory,
+ error.Unexpected,
+ error.SystemResources,
+ error.Timeout,
+ error.NetworkDown,
+ error.NetworkUnreachable,
+ error.HostUnreachable,
+ error.FastOpenAlreadyInProgress,
+ error.ConnectionRefused,
+ error.StreamTooLong,
+ => |e| log.err("failed to serve '{f}' ({t})", .{ stream.socket.address, e }),
+
+ error.EndOfStream,
+ error.ConnectionResetByPeer,
+ => log.info("client '{f}' disconnected", .{stream.socket.address}),
- while (ids.running.load(.monotonic)) {
- stream_writer.interface.writeAll("zig> ") catch @panic("IncrementalDebugServer: failed to write");
- const untrimmed = stream_reader.interface.takeSentinel('\n') catch |err| switch (err) {
- error.EndOfStream => break,
- else => @panic("IncrementalDebugServer: failed to read command"),
+ error.AddressFamilyUnsupported,
+ error.SocketUnconnected,
+ error.SocketNotBound,
+ error.AccessDenied,
+ => unreachable,
+ }
};
+ }
+}
+
+fn serveStream(
+ ids: *IncrementalDebugServer,
+ stream_reader: *Io.Reader,
+ stream_writer: *Io.Writer,
+) error{
+ Canceled,
+ OutOfMemory,
+ EndOfStream,
+ StreamTooLong,
+ ReadFailed,
+ WriteFailed,
+}!noreturn {
+ const gpa = ids.zcu.gpa;
+ const io = ids.zcu.comp.io;
+
+ var text_out: std.ArrayList(u8) = .empty;
+ defer text_out.deinit(gpa);
+
+ while (true) {
+ try stream_writer.writeAll("zig> ");
+ const untrimmed = try stream_reader.takeSentinel('\n');
const cmd_and_arg = std.mem.trim(u8, untrimmed, " \t\r\n");
const cmd: []const u8, const arg: []const u8 = if (std.mem.indexOfScalar(u8, cmd_and_arg, ' ')) |i|
.{ cmd_and_arg[0..i], cmd_and_arg[i + 1 ..] }
@@ -74,18 +139,21 @@ fn runThread(ids: *IncrementalDebugServer) void {
text_out.clearRetainingCapacity();
{
if (!ids.mutex.tryLock()) {
- stream_writer.interface.writeAll("waiting for in-progress update to finish...\n") catch @panic("IncrementalDebugServer: failed to write");
- ids.mutex.lock();
+ try stream_writer.writeAll("waiting for in-progress update to finish...\n");
+ try ids.mutex.lock(io);
}
- defer ids.mutex.unlock();
- var allocating: std.Io.Writer.Allocating = .fromArrayList(gpa, &text_out);
+ defer ids.mutex.unlock(io);
+ var allocating: Io.Writer.Allocating = .fromArrayList(gpa, &text_out);
defer text_out = allocating.toArrayList();
- handleCommand(ids.zcu, &allocating.writer, cmd, arg) catch @panic("IncrementalDebugServer: out of memory");
+ handleCommand(ids.zcu, &allocating.writer, cmd, arg) catch |err| switch (err) {
+ error.OutOfMemory,
+ error.WriteFailed,
+ => return error.OutOfMemory,
+ };
}
- text_out.append(gpa, '\n') catch @panic("IncrementalDebugServer: out of memory");
- stream_writer.interface.writeAll(text_out.items) catch @panic("IncrementalDebugServer: failed to write");
+ try text_out.append(gpa, '\n');
+ try stream_writer.writeAll(text_out.items);
}
- std.debug.print("closing incremental debug server\n", .{});
}
const help_str: []const u8 =
@@ -123,7 +191,7 @@ const help_str: []const u8 =
\\
;
-fn handleCommand(zcu: *Zcu, w: *std.Io.Writer, cmd_str: []const u8, arg_str: []const u8) error{ WriteFailed, OutOfMemory }!void {
+fn handleCommand(zcu: *Zcu, w: *Io.Writer, cmd_str: []const u8, arg_str: []const u8) error{ WriteFailed, OutOfMemory }!void {
const ip = &zcu.intern_pool;
if (std.mem.eql(u8, cmd_str, "help")) {
try w.writeAll(help_str);
@@ -328,7 +396,8 @@ fn printAnalUnit(unit: AnalUnit, buf: *[32]u8) []const u8 {
};
return std.fmt.bufPrint(buf, "{s} {d}", .{ @tagName(unit.unwrap()), idx }) catch unreachable;
}
-fn printType(ty: Type, zcu: *const Zcu, w: anytype) !void {
+
+fn printType(ty: Type, zcu: *const Zcu, w: *Io.Writer) Io.Writer.Error!void {
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int| try w.print("{c}{d}", .{
@@ -377,6 +446,7 @@ fn printType(ty: Type, zcu: *const Zcu, w: anytype) !void {
const std = @import("std");
const Io = std.Io;
const Allocator = std.mem.Allocator;
+const log = std.log.scoped(.incremental_debug_server);
const Compilation = @import("Compilation.zig");
const Zcu = @import("Zcu.zig");
diff --git a/src/InternPool.zig b/src/InternPool.zig
index 302e71a2b1..5568c493d9 100644
--- a/src/InternPool.zig
+++ b/src/InternPool.zig
@@ -8,6 +8,7 @@ const assert = std.debug.assert;
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Cache = std.Build.Cache;
+const Io = std.Io;
const Limb = std.math.big.Limb;
const Hash = std.hash.Wyhash;
@@ -214,6 +215,7 @@ pub const TrackedInst = extern struct {
pub fn trackZir(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
key: TrackedInst,
) Allocator.Error!TrackedInst.Index {
@@ -235,8 +237,8 @@ pub fn trackZir(
if (entry.hash != hash) continue;
if (std.meta.eql(index.resolveFull(ip) orelse continue, key)) return index;
}
- shard.mutate.tracked_inst_map.mutex.lock();
- defer shard.mutate.tracked_inst_map.mutex.unlock();
+ shard.mutate.tracked_inst_map.mutex.lock(io, tid);
+ defer shard.mutate.tracked_inst_map.mutex.unlock(io);
if (map.entries != shard.shared.tracked_inst_map.entries) {
map = shard.shared.tracked_inst_map;
map_mask = map.header().mask();
@@ -251,7 +253,7 @@ pub fn trackZir(
}
defer shard.mutate.tracked_inst_map.len += 1;
const local = ip.getLocal(tid);
- const list = local.getMutableTrackedInsts(gpa);
+ const list = local.getMutableTrackedInsts(gpa, io);
try list.ensureUnusedCapacity(1);
const map_header = map.header().*;
if (shard.mutate.tracked_inst_map.len < map_header.capacity * 3 / 5) {
@@ -317,6 +319,7 @@ pub fn trackZir(
pub fn rehashTrackedInsts(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
) Allocator.Error!void {
assert(tid == .main); // we shouldn't have any other threads active right now
@@ -333,7 +336,7 @@ pub fn rehashTrackedInsts(
for (ip.locals) |*local| {
// `getMutableTrackedInsts` is okay only because no other thread is currently active.
// We need the `mutate` for the len.
- for (local.getMutableTrackedInsts(gpa).viewAllowEmpty().items(.@"0")) |tracked_inst| {
+ for (local.getMutableTrackedInsts(gpa, io).viewAllowEmpty().items(.@"0")) |tracked_inst| {
if (tracked_inst.inst == .lost) continue; // we can ignore this one!
const full_hash = Hash.hash(0, std.mem.asBytes(&tracked_inst));
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
@@ -379,7 +382,7 @@ pub fn rehashTrackedInsts(
for (ip.locals, 0..) |*local, local_tid| {
// `getMutableTrackedInsts` is okay only because no other thread is currently active.
// We need the `mutate` for the len.
- for (local.getMutableTrackedInsts(gpa).viewAllowEmpty().items(.@"0"), 0..) |tracked_inst, local_inst_index| {
+ for (local.getMutableTrackedInsts(gpa, io).viewAllowEmpty().items(.@"0"), 0..) |tracked_inst, local_inst_index| {
if (tracked_inst.inst == .lost) continue; // we can ignore this one!
const full_hash = Hash.hash(0, std.mem.asBytes(&tracked_inst));
const hash: u32 = @truncate(full_hash >> 32);
@@ -1113,11 +1116,11 @@ const Local = struct {
const Namespaces = List(struct { *[1 << namespaces_bucket_width]Zcu.Namespace });
const ListMutate = struct {
- mutex: std.Thread.Mutex,
+ mutex: Io.Mutex,
len: u32,
const empty: ListMutate = .{
- .mutex = .{},
+ .mutex = .init,
.len = 0,
};
};
@@ -1144,6 +1147,7 @@ const Local = struct {
const ListSelf = @This();
const Mutable = struct {
gpa: Allocator,
+ io: Io,
arena: *std.heap.ArenaAllocator.State,
mutate: *ListMutate,
list: *ListSelf,
@@ -1296,6 +1300,7 @@ const Local = struct {
}
fn setCapacity(mutable: Mutable, capacity: u32) Allocator.Error!void {
+ const io = mutable.io;
var arena = mutable.arena.promote(mutable.gpa);
defer mutable.arena.* = arena.state;
const buf = try arena.allocator().alignedAlloc(
@@ -1313,8 +1318,8 @@ const Local = struct {
const new_slice = new_list.view().slice();
inline for (fields) |field| @memcpy(new_slice.items(field)[0..len], old_slice.items(field)[0..len]);
}
- mutable.mutate.mutex.lock();
- defer mutable.mutate.mutex.unlock();
+ mutable.mutate.mutex.lockUncancelable(io);
+ defer mutable.mutate.mutex.unlock(io);
mutable.list.release(new_list);
}
@@ -1375,18 +1380,20 @@ const Local = struct {
};
}
- pub fn getMutableItems(local: *Local, gpa: Allocator) List(Item).Mutable {
+ pub fn getMutableItems(local: *Local, gpa: Allocator, io: Io) List(Item).Mutable {
return .{
.gpa = gpa,
+ .io = io,
.arena = &local.mutate.arena,
.mutate = &local.mutate.items,
.list = &local.shared.items,
};
}
- pub fn getMutableExtra(local: *Local, gpa: Allocator) Extra.Mutable {
+ pub fn getMutableExtra(local: *Local, gpa: Allocator, io: Io) Extra.Mutable {
return .{
.gpa = gpa,
+ .io = io,
.arena = &local.mutate.arena,
.mutate = &local.mutate.extra,
.list = &local.shared.extra,
@@ -1397,11 +1404,12 @@ const Local = struct {
/// On 64-bit systems, this array is used for big integers and associated metadata.
/// Use the helper methods instead of accessing this directly in order to not
/// violate the above mechanism.
- pub fn getMutableLimbs(local: *Local, gpa: Allocator) Limbs.Mutable {
+ pub fn getMutableLimbs(local: *Local, gpa: Allocator, io: Io) Limbs.Mutable {
return switch (@sizeOf(Limb)) {
- @sizeOf(u32) => local.getMutableExtra(gpa),
+ @sizeOf(u32) => local.getMutableExtra(gpa, io),
@sizeOf(u64) => .{
.gpa = gpa,
+ .io = io,
.arena = &local.mutate.arena,
.mutate = &local.mutate.limbs,
.list = &local.shared.limbs,
@@ -1411,9 +1419,10 @@ const Local = struct {
}
/// A list of offsets into `string_bytes` for each string.
- pub fn getMutableStrings(local: *Local, gpa: Allocator) Strings.Mutable {
+ pub fn getMutableStrings(local: *Local, gpa: Allocator, io: Io) Strings.Mutable {
return .{
.gpa = gpa,
+ .io = io,
.arena = &local.mutate.arena,
.mutate = &local.mutate.strings,
.list = &local.shared.strings,
@@ -1425,9 +1434,10 @@ const Local = struct {
/// is referencing the data here whether they want to store both index and length,
/// thus allowing null bytes, or store only index, and use null-termination. The
/// `strings_bytes` array is agnostic to either usage.
- pub fn getMutableStringBytes(local: *Local, gpa: Allocator) StringBytes.Mutable {
+ pub fn getMutableStringBytes(local: *Local, gpa: Allocator, io: Io) StringBytes.Mutable {
return .{
.gpa = gpa,
+ .io = io,
.arena = &local.mutate.arena,
.mutate = &local.mutate.string_bytes,
.list = &local.shared.string_bytes,
@@ -1436,9 +1446,10 @@ const Local = struct {
/// An index into `tracked_insts` gives a reference to a single ZIR instruction which
/// persists across incremental updates.
- pub fn getMutableTrackedInsts(local: *Local, gpa: Allocator) TrackedInsts.Mutable {
+ pub fn getMutableTrackedInsts(local: *Local, gpa: Allocator, io: Io) TrackedInsts.Mutable {
return .{
.gpa = gpa,
+ .io = io,
.arena = &local.mutate.arena,
.mutate = &local.mutate.tracked_insts,
.list = &local.shared.tracked_insts,
@@ -1452,9 +1463,10 @@ const Local = struct {
///
/// Key is the hash of the path to this file, used to store
/// `InternPool.TrackedInst`.
- pub fn getMutableFiles(local: *Local, gpa: Allocator) List(File).Mutable {
+ pub fn getMutableFiles(local: *Local, gpa: Allocator, io: Io) List(File).Mutable {
return .{
.gpa = gpa,
+ .io = io,
.arena = &local.mutate.arena,
.mutate = &local.mutate.files,
.list = &local.shared.files,
@@ -1466,27 +1478,30 @@ const Local = struct {
/// field names and values directly, relying on one of these maps, stored separately,
/// to provide lookup.
/// These are not serialized; it is computed upon deserialization.
- pub fn getMutableMaps(local: *Local, gpa: Allocator) Maps.Mutable {
+ pub fn getMutableMaps(local: *Local, gpa: Allocator, io: Io) Maps.Mutable {
return .{
.gpa = gpa,
+ .io = io,
.arena = &local.mutate.arena,
.mutate = &local.mutate.maps,
.list = &local.shared.maps,
};
}
- pub fn getMutableNavs(local: *Local, gpa: Allocator) Navs.Mutable {
+ pub fn getMutableNavs(local: *Local, gpa: Allocator, io: Io) Navs.Mutable {
return .{
.gpa = gpa,
+ .io = io,
.arena = &local.mutate.arena,
.mutate = &local.mutate.navs,
.list = &local.shared.navs,
};
}
- pub fn getMutableComptimeUnits(local: *Local, gpa: Allocator) ComptimeUnits.Mutable {
+ pub fn getMutableComptimeUnits(local: *Local, gpa: Allocator, io: Io) ComptimeUnits.Mutable {
return .{
.gpa = gpa,
+ .io = io,
.arena = &local.mutate.arena,
.mutate = &local.mutate.comptime_units,
.list = &local.shared.comptime_units,
@@ -1503,9 +1518,10 @@ const Local = struct {
/// serialization trivial.
/// * It provides a unique integer to be used for anonymous symbol names, avoiding
/// multi-threaded contention on an atomic counter.
- pub fn getMutableNamespaces(local: *Local, gpa: Allocator) Namespaces.Mutable {
+ pub fn getMutableNamespaces(local: *Local, gpa: Allocator, io: Io) Namespaces.Mutable {
return .{
.gpa = gpa,
+ .io = io,
.arena = &local.mutate.arena,
.mutate = &local.mutate.namespaces.buckets_list,
.list = &local.shared.namespaces,
@@ -1535,11 +1551,63 @@ const Shard = struct {
},
const Mutate = struct {
- mutex: std.Thread.Mutex.Recursive,
+ /// This mutex needs to be recursive because `getFuncDeclIes` interns multiple things at
+ /// once (the function, its IES, the corresponding error union, and the resulting function
+ /// type), so calls `getOrPutKeyEnsuringAdditionalCapacity` multiple times. Each of these
+ /// calls acquires a lock which will only be released when the whole operation is finalized,
+ /// and these different items could be in the same shard, in which case that shard's lock
+ /// will be acquired multiple times.
+ mutex: RecursiveMutex,
len: u32,
+ const RecursiveMutex = struct {
+ const OptionalTid = if (single_threaded) enum(u8) {
+ null,
+ main,
+ fn unwrap(ot: OptionalTid) ?Zcu.PerThread.Id {
+ return switch (ot) {
+ .null => null,
+ .main => .main,
+ };
+ }
+ fn wrap(tid: Zcu.PerThread.Id) OptionalTid {
+ comptime assert(tid == .main);
+ return .main;
+ }
+ } else packed struct(u8) {
+ non_null: bool,
+ value: Zcu.PerThread.Id,
+ const @"null": OptionalTid = .{ .non_null = false, .value = .main };
+ fn unwrap(ot: OptionalTid) ?Zcu.PerThread.Id {
+ return if (ot.non_null) ot.value else null;
+ }
+ fn wrap(tid: Zcu.PerThread.Id) OptionalTid {
+ return .{ .non_null = true, .value = tid };
+ }
+ };
+ mutex: Io.Mutex,
+ tid: std.atomic.Value(OptionalTid),
+ lock_count: u32,
+ const init: RecursiveMutex = .{ .mutex = .init, .tid = .init(.null), .lock_count = 0 };
+ fn lock(r: *RecursiveMutex, io: Io, tid: Zcu.PerThread.Id) void {
+ if (r.tid.load(.monotonic) != OptionalTid.wrap(tid)) {
+ r.mutex.lockUncancelable(io);
+ assert(r.lock_count == 0);
+ r.tid.store(.wrap(tid), .monotonic);
+ }
+ r.lock_count += 1;
+ }
+ fn unlock(r: *RecursiveMutex, io: Io) void {
+ r.lock_count -= 1;
+ if (r.lock_count == 0) {
+ r.tid.store(.null, .monotonic);
+ r.mutex.unlock(io);
+ }
+ }
+ };
+
const empty: Mutate = .{
- .mutex = std.Thread.Mutex.Recursive.init,
+ .mutex = .init,
.len = 0,
};
};
@@ -1896,7 +1964,7 @@ pub const NullTerminatedString = enum(u32) {
ip: *const InternPool,
id: bool,
};
- fn format(data: FormatData, writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ fn format(data: FormatData, writer: *Io.Writer) Io.Writer.Error!void {
const slice = data.string.toSlice(data.ip);
if (!data.id) {
try writer.writeAll(slice);
@@ -2323,10 +2391,10 @@ pub const Key = union(enum) {
return @atomicLoad(FuncAnalysis, func.analysisPtr(ip), .unordered);
}
- pub fn setBranchHint(func: Func, ip: *InternPool, hint: std.builtin.BranchHint) void {
+ pub fn setBranchHint(func: Func, ip: *InternPool, io: Io, hint: std.builtin.BranchHint) void {
const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const analysis_ptr = func.analysisPtr(ip);
var analysis = analysis_ptr.*;
@@ -2334,10 +2402,10 @@ pub const Key = union(enum) {
@atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
}
- pub fn setAnalyzed(func: Func, ip: *InternPool) void {
+ pub fn setAnalyzed(func: Func, ip: *InternPool, io: Io) void {
const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const analysis_ptr = func.analysisPtr(ip);
var analysis = analysis_ptr.*;
@@ -2365,10 +2433,10 @@ pub const Key = union(enum) {
return @atomicLoad(u32, func.branchQuotaPtr(ip), .unordered);
}
- pub fn maxBranchQuota(func: Func, ip: *InternPool, new_branch_quota: u32) void {
+ pub fn maxBranchQuota(func: Func, ip: *InternPool, io: Io, new_branch_quota: u32) void {
const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const branch_quota_ptr = func.branchQuotaPtr(ip);
@atomicStore(u32, branch_quota_ptr, @max(branch_quota_ptr.*, new_branch_quota), .release);
@@ -2385,10 +2453,10 @@ pub const Key = union(enum) {
return @atomicLoad(Index, func.resolvedErrorSetPtr(ip), .unordered);
}
- pub fn setResolvedErrorSet(func: Func, ip: *InternPool, ies: Index) void {
+ pub fn setResolvedErrorSet(func: Func, ip: *InternPool, io: Io, ies: Index) void {
const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
@atomicStore(Index, func.resolvedErrorSetPtr(ip), ies, .release);
}
@@ -3349,10 +3417,10 @@ pub const LoadedUnionType = struct {
return @atomicLoad(Index, u.tagTypePtr(ip), .unordered);
}
- pub fn setTagType(u: LoadedUnionType, ip: *InternPool, tag_type: Index) void {
+ pub fn setTagType(u: LoadedUnionType, ip: *InternPool, io: Io, tag_type: Index) void {
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
@atomicStore(Index, u.tagTypePtr(ip), tag_type, .release);
}
@@ -3368,10 +3436,10 @@ pub const LoadedUnionType = struct {
return @atomicLoad(Tag.TypeUnion.Flags, u.flagsPtr(ip), .unordered);
}
- pub fn setStatus(u: LoadedUnionType, ip: *InternPool, status: Status) void {
+ pub fn setStatus(u: LoadedUnionType, ip: *InternPool, io: Io, status: Status) void {
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = u.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3379,10 +3447,10 @@ pub const LoadedUnionType = struct {
@atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
}
- pub fn setStatusIfLayoutWip(u: LoadedUnionType, ip: *InternPool, status: Status) void {
+ pub fn setStatusIfLayoutWip(u: LoadedUnionType, ip: *InternPool, io: Io, status: Status) void {
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = u.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3390,10 +3458,10 @@ pub const LoadedUnionType = struct {
@atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
}
- pub fn setAlignment(u: LoadedUnionType, ip: *InternPool, alignment: Alignment) void {
+ pub fn setAlignment(u: LoadedUnionType, ip: *InternPool, io: Io, alignment: Alignment) void {
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = u.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3401,10 +3469,10 @@ pub const LoadedUnionType = struct {
@atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
}
- pub fn assumeRuntimeBitsIfFieldTypesWip(u: LoadedUnionType, ip: *InternPool) bool {
+ pub fn assumeRuntimeBitsIfFieldTypesWip(u: LoadedUnionType, ip: *InternPool, io: Io) bool {
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = u.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3419,10 +3487,10 @@ pub const LoadedUnionType = struct {
return u.flagsUnordered(ip).requires_comptime;
}
- pub fn setRequiresComptimeWip(u: LoadedUnionType, ip: *InternPool) RequiresComptime {
+ pub fn setRequiresComptimeWip(u: LoadedUnionType, ip: *InternPool, io: Io) RequiresComptime {
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = u.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3433,12 +3501,12 @@ pub const LoadedUnionType = struct {
return flags.requires_comptime;
}
- pub fn setRequiresComptime(u: LoadedUnionType, ip: *InternPool, requires_comptime: RequiresComptime) void {
+ pub fn setRequiresComptime(u: LoadedUnionType, ip: *InternPool, io: Io, requires_comptime: RequiresComptime) void {
assert(requires_comptime != .wip); // see setRequiresComptimeWip
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = u.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3446,10 +3514,10 @@ pub const LoadedUnionType = struct {
@atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
}
- pub fn assumePointerAlignedIfFieldTypesWip(u: LoadedUnionType, ip: *InternPool, ptr_align: Alignment) bool {
+ pub fn assumePointerAlignedIfFieldTypesWip(u: LoadedUnionType, ip: *InternPool, io: Io, ptr_align: Alignment) bool {
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = u.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3495,10 +3563,10 @@ pub const LoadedUnionType = struct {
return self.flagsUnordered(ip).status.haveLayout();
}
- pub fn setHaveLayout(u: LoadedUnionType, ip: *InternPool, size: u32, padding: u32, alignment: Alignment) void {
+ pub fn setHaveLayout(u: LoadedUnionType, ip: *InternPool, io: Io, size: u32, padding: u32, alignment: Alignment) void {
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
@atomicStore(u32, u.sizePtr(ip), size, .unordered);
@atomicStore(u32, u.paddingPtr(ip), padding, .unordered);
@@ -3767,10 +3835,10 @@ pub const LoadedStructType = struct {
return s.flagsUnordered(ip).requires_comptime;
}
- pub fn setRequiresComptimeWip(s: LoadedStructType, ip: *InternPool) RequiresComptime {
+ pub fn setRequiresComptimeWip(s: LoadedStructType, ip: *InternPool, io: Io) RequiresComptime {
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = s.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3781,12 +3849,12 @@ pub const LoadedStructType = struct {
return flags.requires_comptime;
}
- pub fn setRequiresComptime(s: LoadedStructType, ip: *InternPool, requires_comptime: RequiresComptime) void {
+ pub fn setRequiresComptime(s: LoadedStructType, ip: *InternPool, io: Io, requires_comptime: RequiresComptime) void {
assert(requires_comptime != .wip); // see setRequiresComptimeWip
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = s.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3794,12 +3862,12 @@ pub const LoadedStructType = struct {
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
}
- pub fn assumeRuntimeBitsIfFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool {
+ pub fn assumeRuntimeBitsIfFieldTypesWip(s: LoadedStructType, ip: *InternPool, io: Io) bool {
if (s.layout == .@"packed") return false;
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = s.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3810,12 +3878,12 @@ pub const LoadedStructType = struct {
return flags.field_types_wip;
}
- pub fn setFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool {
+ pub fn setFieldTypesWip(s: LoadedStructType, ip: *InternPool, io: Io) bool {
if (s.layout == .@"packed") return false;
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = s.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3826,12 +3894,12 @@ pub const LoadedStructType = struct {
return flags.field_types_wip;
}
- pub fn clearFieldTypesWip(s: LoadedStructType, ip: *InternPool) void {
+ pub fn clearFieldTypesWip(s: LoadedStructType, ip: *InternPool, io: Io) void {
if (s.layout == .@"packed") return;
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = s.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3839,12 +3907,12 @@ pub const LoadedStructType = struct {
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
}
- pub fn setLayoutWip(s: LoadedStructType, ip: *InternPool) bool {
+ pub fn setLayoutWip(s: LoadedStructType, ip: *InternPool, io: Io) bool {
if (s.layout == .@"packed") return false;
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = s.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3855,12 +3923,12 @@ pub const LoadedStructType = struct {
return flags.layout_wip;
}
- pub fn clearLayoutWip(s: LoadedStructType, ip: *InternPool) void {
+ pub fn clearLayoutWip(s: LoadedStructType, ip: *InternPool, io: Io) void {
if (s.layout == .@"packed") return;
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = s.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3868,10 +3936,10 @@ pub const LoadedStructType = struct {
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
}
- pub fn setAlignment(s: LoadedStructType, ip: *InternPool, alignment: Alignment) void {
+ pub fn setAlignment(s: LoadedStructType, ip: *InternPool, io: Io, alignment: Alignment) void {
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = s.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3879,10 +3947,10 @@ pub const LoadedStructType = struct {
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
}
- pub fn assumePointerAlignedIfFieldTypesWip(s: LoadedStructType, ip: *InternPool, ptr_align: Alignment) bool {
+ pub fn assumePointerAlignedIfFieldTypesWip(s: LoadedStructType, ip: *InternPool, io: Io, ptr_align: Alignment) bool {
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = s.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3894,10 +3962,10 @@ pub const LoadedStructType = struct {
return flags.field_types_wip;
}
- pub fn assumePointerAlignedIfWip(s: LoadedStructType, ip: *InternPool, ptr_align: Alignment) bool {
+ pub fn assumePointerAlignedIfWip(s: LoadedStructType, ip: *InternPool, io: Io, ptr_align: Alignment) bool {
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = s.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3911,12 +3979,12 @@ pub const LoadedStructType = struct {
return flags.alignment_wip;
}
- pub fn clearAlignmentWip(s: LoadedStructType, ip: *InternPool) void {
+ pub fn clearAlignmentWip(s: LoadedStructType, ip: *InternPool, io: Io) void {
if (s.layout == .@"packed") return;
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = s.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3924,10 +3992,10 @@ pub const LoadedStructType = struct {
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
}
- pub fn setInitsWip(s: LoadedStructType, ip: *InternPool) bool {
+ pub fn setInitsWip(s: LoadedStructType, ip: *InternPool, io: Io) bool {
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
switch (s.layout) {
.@"packed" => {
@@ -3951,10 +4019,10 @@ pub const LoadedStructType = struct {
}
}
- pub fn clearInitsWip(s: LoadedStructType, ip: *InternPool) void {
+ pub fn clearInitsWip(s: LoadedStructType, ip: *InternPool, io: Io) void {
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
switch (s.layout) {
.@"packed" => {
@@ -3972,12 +4040,12 @@ pub const LoadedStructType = struct {
}
}
- pub fn setFullyResolved(s: LoadedStructType, ip: *InternPool) bool {
+ pub fn setFullyResolved(s: LoadedStructType, ip: *InternPool, io: Io) bool {
if (s.layout == .@"packed") return true;
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = s.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -3988,10 +4056,10 @@ pub const LoadedStructType = struct {
return flags.fully_resolved;
}
- pub fn clearFullyResolved(s: LoadedStructType, ip: *InternPool) void {
+ pub fn clearFullyResolved(s: LoadedStructType, ip: *InternPool, io: Io) void {
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const flags_ptr = s.flagsPtr(ip);
var flags = flags_ptr.*;
@@ -4027,10 +4095,10 @@ pub const LoadedStructType = struct {
return @atomicLoad(Index, s.backingIntTypePtr(ip), .unordered);
}
- pub fn setBackingIntType(s: LoadedStructType, ip: *InternPool, backing_int_ty: Index) void {
+ pub fn setBackingIntType(s: LoadedStructType, ip: *InternPool, io: Io, backing_int_ty: Index) void {
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
@atomicStore(Index, s.backingIntTypePtr(ip), backing_int_ty, .release);
}
@@ -4054,10 +4122,10 @@ pub const LoadedStructType = struct {
};
}
- pub fn setHaveFieldInits(s: LoadedStructType, ip: *InternPool) void {
+ pub fn setHaveFieldInits(s: LoadedStructType, ip: *InternPool, io: Io) void {
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
switch (s.layout) {
.@"packed" => {
@@ -4082,10 +4150,10 @@ pub const LoadedStructType = struct {
};
}
- pub fn setLayoutResolved(s: LoadedStructType, ip: *InternPool, size: u32, alignment: Alignment) void {
+ pub fn setLayoutResolved(s: LoadedStructType, ip: *InternPool, io: Io, size: u32, alignment: Alignment) void {
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
@atomicStore(u32, s.sizePtr(ip), size, .unordered);
const flags_ptr = s.flagsPtr(ip);
@@ -6826,8 +6894,8 @@ pub const MemoizedCall = struct {
branch_count: u32,
};
-pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
- errdefer ip.deinit(gpa);
+pub fn init(ip: *InternPool, gpa: Allocator, io: Io, available_threads: usize) !void {
+ errdefer ip.deinit(gpa, io);
assert(ip.locals.len == 0 and ip.shards.len == 0);
assert(available_threads > 0 and available_threads <= std.math.maxInt(u8));
@@ -6865,7 +6933,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
.namespaces = .empty,
},
});
- for (ip.locals) |*local| try local.getMutableStrings(gpa).append(.{0});
+ for (ip.locals) |*local| try local.getMutableStrings(gpa, io).append(.{0});
ip.tid_width = @intCast(std.math.log2_int_ceil(usize, used_threads));
ip.tid_shift_30 = if (single_threaded) 0 else 30 - ip.tid_width;
@@ -6874,28 +6942,28 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
ip.shards = try gpa.alloc(Shard, @as(usize, 1) << ip.tid_width);
@memset(ip.shards, .{
.shared = .{
- .map = Shard.Map(Index).empty,
- .string_map = Shard.Map(OptionalNullTerminatedString).empty,
- .tracked_inst_map = Shard.Map(TrackedInst.Index.Optional).empty,
+ .map = .empty,
+ .string_map = .empty,
+ .tracked_inst_map = .empty,
},
.mutate = .{
- .map = Shard.Mutate.empty,
- .string_map = Shard.Mutate.empty,
- .tracked_inst_map = Shard.Mutate.empty,
+ .map = .empty,
+ .string_map = .empty,
+ .tracked_inst_map = .empty,
},
});
// Reserve string index 0 for an empty string.
- assert((try ip.getOrPutString(gpa, .main, "", .no_embedded_nulls)) == .empty);
+ assert((try ip.getOrPutString(gpa, io, .main, "", .no_embedded_nulls)) == .empty);
// This inserts all the statically-known values into the intern pool in the
// order expected.
for (&static_keys, 0..) |key, key_index| switch (@as(Index, @enumFromInt(key_index))) {
- .empty_tuple_type => assert(try ip.getTupleType(gpa, .main, .{
+ .empty_tuple_type => assert(try ip.getTupleType(gpa, io, .main, .{
.types = &.{},
.values = &.{},
}) == .empty_tuple_type),
- else => |expected_index| assert(try ip.get(gpa, .main, key) == expected_index),
+ else => |expected_index| assert(try ip.get(gpa, io, .main, key) == expected_index),
};
if (std.debug.runtime_safety) {
@@ -6905,7 +6973,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
}
}
-pub fn deinit(ip: *InternPool, gpa: Allocator) void {
+pub fn deinit(ip: *InternPool, gpa: Allocator, io: Io) void {
if (debug_state.enable_checks) std.debug.assert(debug_state.intern_pool == null);
ip.src_hash_deps.deinit(gpa);
@@ -6940,7 +7008,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
namespace.test_decls.deinit(gpa);
}
};
- const maps = local.getMutableMaps(gpa);
+ const maps = local.getMutableMaps(gpa, io);
if (maps.mutate.len > 0) for (maps.view().items(.@"0")) |*map| map.deinit(gpa);
local.mutate.arena.promote(gpa).deinit();
}
@@ -7645,6 +7713,7 @@ const GetOrPutKey = union(enum) {
new: struct {
ip: *InternPool,
tid: Zcu.PerThread.Id,
+ io: Io,
shard: *Shard,
map_index: u32,
},
@@ -7679,7 +7748,7 @@ const GetOrPutKey = union(enum) {
.new => |info| {
assert(info.shard.shared.map.entries[info.map_index].value == index);
info.shard.mutate.map.len += 1;
- info.shard.mutate.map.mutex.unlock();
+ info.shard.mutate.map.mutex.unlock(info.io);
gop.* = .{ .existing = index };
},
}
@@ -7688,7 +7757,7 @@ const GetOrPutKey = union(enum) {
fn cancel(gop: *GetOrPutKey) void {
switch (gop.*) {
.existing => {},
- .new => |info| info.shard.mutate.map.mutex.unlock(),
+ .new => |info| info.shard.mutate.map.mutex.unlock(info.io),
}
gop.* = .{ .existing = undefined };
}
@@ -7705,14 +7774,16 @@ const GetOrPutKey = union(enum) {
fn getOrPutKey(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
key: Key,
) Allocator.Error!GetOrPutKey {
- return ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, key, 0);
+ return ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, io, tid, key, 0);
}
fn getOrPutKeyEnsuringAdditionalCapacity(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
key: Key,
additional_capacity: u32,
@@ -7733,8 +7804,8 @@ fn getOrPutKeyEnsuringAdditionalCapacity(
if (index.unwrap(ip).getTag(ip) == .removed) continue;
if (ip.indexToKey(index).eql(key, ip)) return .{ .existing = index };
}
- shard.mutate.map.mutex.lock();
- errdefer shard.mutate.map.mutex.unlock();
+ shard.mutate.map.mutex.lock(io, tid);
+ errdefer shard.mutate.map.mutex.unlock(io);
if (map.entries != shard.shared.map.entries) {
map = shard.shared.map;
map_mask = map.header().mask();
@@ -7747,7 +7818,7 @@ fn getOrPutKeyEnsuringAdditionalCapacity(
if (index == .none) break;
if (entry.hash != hash) continue;
if (ip.indexToKey(index).eql(key, ip)) {
- defer shard.mutate.map.mutex.unlock();
+ defer shard.mutate.map.mutex.unlock(io);
return .{ .existing = index };
}
}
@@ -7801,6 +7872,7 @@ fn getOrPutKeyEnsuringAdditionalCapacity(
return .{ .new = .{
.ip = ip,
.tid = tid,
+ .io = io,
.shard = shard,
.map_index = map_index,
} };
@@ -7815,14 +7887,15 @@ fn getOrPutKeyEnsuringAdditionalCapacity(
/// will be cleaned up when the `Zcu` undergoes garbage collection.
fn putKeyReplace(
ip: *InternPool,
+ io: Io,
tid: Zcu.PerThread.Id,
key: Key,
) GetOrPutKey {
const full_hash = key.hash64(ip);
const hash: u32 = @truncate(full_hash >> 32);
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
- shard.mutate.map.mutex.lock();
- errdefer shard.mutate.map.mutex.unlock();
+ shard.mutate.map.mutex.lock(io, tid);
+ errdefer shard.mutate.map.mutex.unlock(io);
const map = shard.shared.map;
const map_mask = map.header().mask();
var map_index = hash;
@@ -7838,18 +7911,19 @@ fn putKeyReplace(
return .{ .new = .{
.ip = ip,
.tid = tid,
+ .io = io,
.shard = shard,
.map_index = map_index,
} };
}
-pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index {
- var gop = try ip.getOrPutKey(gpa, tid, key);
+pub fn get(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index {
+ var gop = try ip.getOrPutKey(gpa, io, tid, key);
defer gop.deinit();
if (gop == .existing) return gop.existing;
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
- const extra = local.getMutableExtra(gpa);
+ const items = local.getMutableItems(gpa, io);
+ const extra = local.getMutableExtra(gpa, io);
try items.ensureUnusedCapacity(1);
switch (key) {
.int_type => |int_type| {
@@ -7870,8 +7944,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
gop.cancel();
var new_key = key;
new_key.ptr_type.flags.size = .many;
- const ptr_type_index = try ip.get(gpa, tid, new_key);
- gop = try ip.getOrPutKey(gpa, tid, key);
+ const ptr_type_index = try ip.get(gpa, io, tid, new_key);
+ gop = try ip.getOrPutKey(gpa, io, tid, key);
try items.ensureUnusedCapacity(1);
items.appendAssumeCapacity(.{
@@ -7953,7 +8027,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
assert(error_set_type.names_map == .none);
assert(std.sort.isSorted(NullTerminatedString, error_set_type.names.get(ip), {}, NullTerminatedString.indexLessThan));
const names = error_set_type.names.get(ip);
- const names_map = try ip.addMap(gpa, tid, names.len);
+ const names_map = try ip.addMap(gpa, io, tid, names.len);
ip.addStringsToMap(names_map, names);
const names_len = error_set_type.names.len;
try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).@"struct".fields.len + names_len);
@@ -8051,7 +8125,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
gop.cancel();
var new_key = key;
new_key.ptr.base_addr.uav.orig_ty = ptr.ty;
- gop = try ip.getOrPutKey(gpa, tid, new_key);
+ gop = try ip.getOrPutKey(gpa, io, tid, new_key);
if (gop == .existing) return gop.existing;
}
break :item .{
@@ -8123,11 +8197,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
else => unreachable,
}
gop.cancel();
- const index_index = try ip.get(gpa, tid, .{ .int = .{
+ const index_index = try ip.get(gpa, io, tid, .{ .int = .{
.ty = .usize_type,
.storage = .{ .u64 = base_index.index },
} });
- gop = try ip.getOrPutKey(gpa, tid, key);
+ gop = try ip.getOrPutKey(gpa, io, tid, key);
try items.ensureUnusedCapacity(1);
items.appendAssumeCapacity(.{
.tag = switch (ptr.base_addr) {
@@ -8318,7 +8392,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
} else |_| {}
const tag: Tag = if (big_int.positive) .int_positive else .int_negative;
- try addInt(ip, gpa, tid, int.ty, tag, big_int.limbs);
+ try addInt(ip, gpa, io, tid, int.ty, tag, big_int.limbs);
},
inline .u64, .i64 => |x| {
if (std.math.cast(u32, x)) |casted| {
@@ -8335,7 +8409,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
var buf: [2]Limb = undefined;
const big_int = BigIntMutable.init(&buf, x).toConst();
const tag: Tag = if (big_int.positive) .int_positive else .int_negative;
- try addInt(ip, gpa, tid, int.ty, tag, big_int.limbs);
+ try addInt(ip, gpa, io, tid, int.ty, tag, big_int.limbs);
},
.lazy_align, .lazy_size => unreachable,
}
@@ -8546,11 +8620,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
const elem = switch (aggregate.storage) {
.bytes => |bytes| elem: {
gop.cancel();
- const elem = try ip.get(gpa, tid, .{ .int = .{
+ const elem = try ip.get(gpa, io, tid, .{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes.at(0, ip) },
} });
- gop = try ip.getOrPutKey(gpa, tid, key);
+ gop = try ip.getOrPutKey(gpa, io, tid, key);
try items.ensureUnusedCapacity(1);
break :elem elem;
},
@@ -8570,7 +8644,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
}
if (child == .u8_type) bytes: {
- const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa);
+ const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa, io);
const start = string_bytes.mutate.len;
try string_bytes.ensureUnusedCapacity(@intCast(len_including_sentinel + 1));
try extra.ensureUnusedCapacity(@typeInfo(Bytes).@"struct".fields.len);
@@ -8598,6 +8672,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
});
const string = try ip.getOrPutTrailingString(
gpa,
+ io,
tid,
@intCast(len_including_sentinel),
.maybe_embedded_nulls,
@@ -8647,15 +8722,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
pub fn getUnion(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
un: Key.Union,
) Allocator.Error!Index {
- var gop = try ip.getOrPutKey(gpa, tid, .{ .un = un });
+ var gop = try ip.getOrPutKey(gpa, io, tid, .{ .un = un });
defer gop.deinit();
if (gop == .existing) return gop.existing;
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
- const extra = local.getMutableExtra(gpa);
+ const items = local.getMutableItems(gpa, io);
+ const extra = local.getMutableExtra(gpa, io);
try items.ensureUnusedCapacity(1);
assert(un.ty != .none);
@@ -8706,6 +8782,7 @@ pub const UnionTypeInit = struct {
pub fn getUnionType(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
ini: UnionTypeInit,
/// If it is known that there is an existing type with this key which is outdated,
@@ -8727,16 +8804,16 @@ pub fn getUnionType(
} },
} };
var gop = if (replace_existing)
- ip.putKeyReplace(tid, key)
+ ip.putKeyReplace(io, tid, key)
else
- try ip.getOrPutKey(gpa, tid, key);
+ try ip.getOrPutKey(gpa, io, tid, key);
defer gop.deinit();
if (gop == .existing) return .{ .existing = gop.existing };
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
+ const items = local.getMutableItems(gpa, io);
try items.ensureUnusedCapacity(1);
- const extra = local.getMutableExtra(gpa);
+ const extra = local.getMutableExtra(gpa, io);
const align_elements_len = if (ini.flags.any_aligned_fields) (ini.fields_len + 3) / 4 else 0;
const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4);
@@ -8903,6 +8980,7 @@ pub const StructTypeInit = struct {
pub fn getStructType(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
ini: StructTypeInit,
/// If it is known that there is an existing type with this key which is outdated,
@@ -8924,17 +9002,17 @@ pub fn getStructType(
} },
} };
var gop = if (replace_existing)
- ip.putKeyReplace(tid, key)
+ ip.putKeyReplace(io, tid, key)
else
- try ip.getOrPutKey(gpa, tid, key);
+ try ip.getOrPutKey(gpa, io, tid, key);
defer gop.deinit();
if (gop == .existing) return .{ .existing = gop.existing };
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
- const extra = local.getMutableExtra(gpa);
+ const items = local.getMutableItems(gpa, io);
+ const extra = local.getMutableExtra(gpa, io);
- const names_map = try ip.addMap(gpa, tid, ini.fields_len);
+ const names_map = try ip.addMap(gpa, io, tid, ini.fields_len);
errdefer local.mutate.maps.len -= 1;
const zir_index = switch (ini.key) {
@@ -9109,6 +9187,7 @@ pub const TupleTypeInit = struct {
pub fn getTupleType(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
ini: TupleTypeInit,
) Allocator.Error!Index {
@@ -9116,8 +9195,8 @@ pub fn getTupleType(
for (ini.types) |elem| assert(elem != .none);
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
- const extra = local.getMutableExtra(gpa);
+ const items = local.getMutableItems(gpa, io);
+ const extra = local.getMutableExtra(gpa, io);
const prev_extra_len = extra.mutate.len;
const fields_len: u32 = @intCast(ini.types.len);
@@ -9134,7 +9213,7 @@ pub fn getTupleType(
extra.appendSliceAssumeCapacity(.{@ptrCast(ini.values)});
errdefer extra.mutate.len = prev_extra_len;
- var gop = try ip.getOrPutKey(gpa, tid, .{ .tuple_type = extraTypeTuple(tid, extra.list.*, extra_index) });
+ var gop = try ip.getOrPutKey(gpa, io, tid, .{ .tuple_type = extraTypeTuple(tid, extra.list.*, extra_index) });
defer gop.deinit();
if (gop == .existing) {
extra.mutate.len = prev_extra_len;
@@ -9166,6 +9245,7 @@ pub const GetFuncTypeKey = struct {
pub fn getFuncType(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
key: GetFuncTypeKey,
) Allocator.Error!Index {
@@ -9174,9 +9254,9 @@ pub fn getFuncType(
for (key.param_types) |param_type| assert(param_type != .none);
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
+ const items = local.getMutableItems(gpa, io);
try items.ensureUnusedCapacity(1);
- const extra = local.getMutableExtra(gpa);
+ const extra = local.getMutableExtra(gpa, io);
// The strategy here is to add the function type unconditionally, then to
// ask if it already exists, and if so, revert the lengths of the mutated
@@ -9207,7 +9287,7 @@ pub fn getFuncType(
extra.appendSliceAssumeCapacity(.{@ptrCast(key.param_types)});
errdefer extra.mutate.len = prev_extra_len;
- var gop = try ip.getOrPutKey(gpa, tid, .{
+ var gop = try ip.getOrPutKey(gpa, io, tid, .{
.func_type = extraFuncType(tid, extra.list.*, func_type_extra_index),
});
defer gop.deinit();
@@ -9228,6 +9308,7 @@ pub fn getFuncType(
pub fn getExtern(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
/// `key.owner_nav` is ignored.
key: Key.Extern,
@@ -9236,7 +9317,7 @@ pub fn getExtern(
/// Only set if the `Nav` was newly created.
new_nav: Nav.Index.Optional,
} {
- var gop = try ip.getOrPutKey(gpa, tid, .{ .@"extern" = key });
+ var gop = try ip.getOrPutKey(gpa, io, tid, .{ .@"extern" = key });
defer gop.deinit();
if (gop == .existing) return .{
.index = gop.existing,
@@ -9244,18 +9325,18 @@ pub fn getExtern(
};
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
- const extra = local.getMutableExtra(gpa);
+ const items = local.getMutableItems(gpa, io);
+ const extra = local.getMutableExtra(gpa, io);
try items.ensureUnusedCapacity(1);
try extra.ensureUnusedCapacity(@typeInfo(Tag.Extern).@"struct".fields.len);
- try local.getMutableNavs(gpa).ensureUnusedCapacity(1);
+ try local.getMutableNavs(gpa, io).ensureUnusedCapacity(1);
// Predict the index the `@"extern" will live at, so we can construct the owner `Nav` before releasing the shard's mutex.
const extern_index = Index.Unwrapped.wrap(.{
.tid = tid,
.index = items.mutate.len,
}, ip);
- const owner_nav = ip.createNav(gpa, tid, .{
+ const owner_nav = ip.createNav(gpa, io, tid, .{
.name = key.name,
.fqn = key.name,
.val = extern_index,
@@ -9305,13 +9386,14 @@ pub const GetFuncDeclKey = struct {
pub fn getFuncDecl(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
key: GetFuncDeclKey,
) Allocator.Error!Index {
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
+ const items = local.getMutableItems(gpa, io);
try items.ensureUnusedCapacity(1);
- const extra = local.getMutableExtra(gpa);
+ const extra = local.getMutableExtra(gpa, io);
// The strategy here is to add the function type unconditionally, then to
// ask if it already exists, and if so, revert the lengths of the mutated
@@ -9340,7 +9422,7 @@ pub fn getFuncDecl(
});
errdefer extra.mutate.len = prev_extra_len;
- var gop = try ip.getOrPutKey(gpa, tid, .{
+ var gop = try ip.getOrPutKey(gpa, io, tid, .{
.func = extraFuncDecl(tid, extra.list.*, func_decl_extra_index),
});
defer gop.deinit();
@@ -9387,6 +9469,7 @@ pub const GetFuncDeclIesKey = struct {
pub fn getFuncDeclIes(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
key: GetFuncDeclIesKey,
) Allocator.Error!Index {
@@ -9395,9 +9478,9 @@ pub fn getFuncDeclIes(
for (key.param_types) |param_type| assert(param_type != .none);
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
+ const items = local.getMutableItems(gpa, io);
try items.ensureUnusedCapacity(4);
- const extra = local.getMutableExtra(gpa);
+ const extra = local.getMutableExtra(gpa, io);
// The strategy here is to add the function decl unconditionally, then to
// ask if it already exists, and if so, revert the lengths of the mutated
@@ -9488,7 +9571,7 @@ pub fn getFuncDeclIes(
extra.mutate.len = prev_extra_len;
}
- var func_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, .{
+ var func_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, io, tid, .{
.func = extraFuncDecl(tid, extra.list.*, func_decl_extra_index),
}, 3);
defer func_gop.deinit();
@@ -9509,18 +9592,18 @@ pub fn getFuncDeclIes(
return func_gop.existing;
}
func_gop.putTentative(func_index);
- var error_union_type_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, .{ .error_union_type = .{
+ var error_union_type_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, io, tid, .{ .error_union_type = .{
.error_set_type = error_set_type,
.payload_type = key.bare_return_type,
} }, 2);
defer error_union_type_gop.deinit();
error_union_type_gop.putTentative(error_union_type);
- var error_set_type_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, .{
+ var error_set_type_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, io, tid, .{
.inferred_error_set_type = func_index,
}, 1);
defer error_set_type_gop.deinit();
error_set_type_gop.putTentative(error_set_type);
- var func_ty_gop = try ip.getOrPutKey(gpa, tid, .{
+ var func_ty_gop = try ip.getOrPutKey(gpa, io, tid, .{
.func_type = extraFuncType(tid, extra.list.*, func_type_extra_index),
});
defer func_ty_gop.deinit();
@@ -9536,17 +9619,18 @@ pub fn getFuncDeclIes(
pub fn getErrorSetType(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
names: []const NullTerminatedString,
) Allocator.Error!Index {
assert(std.sort.isSorted(NullTerminatedString, names, {}, NullTerminatedString.indexLessThan));
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
- const extra = local.getMutableExtra(gpa);
+ const items = local.getMutableItems(gpa, io);
+ const extra = local.getMutableExtra(gpa, io);
try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).@"struct".fields.len + names.len);
- const names_map = try ip.addMap(gpa, tid, names.len);
+ const names_map = try ip.addMap(gpa, io, tid, names.len);
errdefer local.mutate.maps.len -= 1;
// The strategy here is to add the type unconditionally, then to ask if it
@@ -9562,7 +9646,7 @@ pub fn getErrorSetType(
extra.appendSliceAssumeCapacity(.{@ptrCast(names)});
errdefer extra.mutate.len = prev_extra_len;
- var gop = try ip.getOrPutKey(gpa, tid, .{
+ var gop = try ip.getOrPutKey(gpa, io, tid, .{
.error_set_type = extraErrorSet(tid, extra.list.*, error_set_extra_index),
});
defer gop.deinit();
@@ -9599,16 +9683,17 @@ pub const GetFuncInstanceKey = struct {
pub fn getFuncInstance(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
arg: GetFuncInstanceKey,
) Allocator.Error!Index {
if (arg.inferred_error_set)
- return getFuncInstanceIes(ip, gpa, tid, arg);
+ return getFuncInstanceIes(ip, gpa, io, tid, arg);
const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner);
const generic_owner_ty = ip.indexToKey(ip.funcDeclInfo(generic_owner).ty).func_type;
- const func_ty = try ip.getFuncType(gpa, tid, .{
+ const func_ty = try ip.getFuncType(gpa, io, tid, .{
.param_types = arg.param_types,
.return_type = arg.bare_return_type,
.noalias_bits = arg.noalias_bits,
@@ -9617,8 +9702,8 @@ pub fn getFuncInstance(
});
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
- const extra = local.getMutableExtra(gpa);
+ const items = local.getMutableItems(gpa, io);
+ const extra = local.getMutableExtra(gpa, io);
try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncInstance).@"struct".fields.len +
arg.comptime_args.len);
@@ -9646,7 +9731,7 @@ pub fn getFuncInstance(
});
extra.appendSliceAssumeCapacity(.{@ptrCast(arg.comptime_args)});
- var gop = try ip.getOrPutKey(gpa, tid, .{
+ var gop = try ip.getOrPutKey(gpa, io, tid, .{
.func = ip.extraFuncInstance(tid, extra.list.*, func_extra_index),
});
defer gop.deinit();
@@ -9664,6 +9749,7 @@ pub fn getFuncInstance(
try finishFuncInstance(
ip,
gpa,
+ io,
tid,
extra,
generic_owner,
@@ -9676,9 +9762,10 @@ pub fn getFuncInstance(
/// This function exists separately than `getFuncInstance` because it needs to
/// create 4 new items in the InternPool atomically before it can look for an
/// existing item in the map.
-pub fn getFuncInstanceIes(
+fn getFuncInstanceIes(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
arg: GetFuncInstanceKey,
) Allocator.Error!Index {
@@ -9688,8 +9775,8 @@ pub fn getFuncInstanceIes(
for (arg.param_types) |param_type| assert(param_type != .none);
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
- const extra = local.getMutableExtra(gpa);
+ const items = local.getMutableItems(gpa, io);
+ const extra = local.getMutableExtra(gpa, io);
try items.ensureUnusedCapacity(4);
const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner);
@@ -9784,7 +9871,7 @@ pub fn getFuncInstanceIes(
extra.mutate.len = prev_extra_len;
}
- var func_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, .{
+ var func_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, io, tid, .{
.func = ip.extraFuncInstance(tid, extra.list.*, func_extra_index),
}, 3);
defer func_gop.deinit();
@@ -9795,18 +9882,18 @@ pub fn getFuncInstanceIes(
return func_gop.existing;
}
func_gop.putTentative(func_index);
- var error_union_type_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, .{ .error_union_type = .{
+ var error_union_type_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, io, tid, .{ .error_union_type = .{
.error_set_type = error_set_type,
.payload_type = arg.bare_return_type,
} }, 2);
defer error_union_type_gop.deinit();
error_union_type_gop.putTentative(error_union_type);
- var error_set_type_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, .{
+ var error_set_type_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, io, tid, .{
.inferred_error_set_type = func_index,
}, 1);
defer error_set_type_gop.deinit();
error_set_type_gop.putTentative(error_set_type);
- var func_ty_gop = try ip.getOrPutKey(gpa, tid, .{
+ var func_ty_gop = try ip.getOrPutKey(gpa, io, tid, .{
.func_type = extraFuncType(tid, extra.list.*, func_type_extra_index),
});
defer func_ty_gop.deinit();
@@ -9814,6 +9901,7 @@ pub fn getFuncInstanceIes(
try finishFuncInstance(
ip,
gpa,
+ io,
tid,
extra,
generic_owner,
@@ -9831,6 +9919,7 @@ pub fn getFuncInstanceIes(
fn finishFuncInstance(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
extra: Local.Extra.Mutable,
generic_owner: Index,
@@ -9841,12 +9930,12 @@ fn finishFuncInstance(
const fn_namespace = fn_owner_nav.analysis.?.namespace;
// TODO: improve this name
- const nav_name = try ip.getOrPutStringFmt(gpa, tid, "{f}__anon_{d}", .{
+ const nav_name = try ip.getOrPutStringFmt(gpa, io, tid, "{f}__anon_{d}", .{
fn_owner_nav.name.fmt(ip), @intFromEnum(func_index),
}, .no_embedded_nulls);
- const nav_index = try ip.createNav(gpa, tid, .{
+ const nav_index = try ip.createNav(gpa, io, tid, .{
.name = nav_name,
- .fqn = try ip.namespacePtr(fn_namespace).internFullyQualifiedName(ip, gpa, tid, nav_name),
+ .fqn = try ip.namespacePtr(fn_namespace).internFullyQualifiedName(ip, gpa, io, tid, nav_name),
.val = func_index,
.is_const = fn_owner_nav.status.fully_resolved.is_const,
.alignment = fn_owner_nav.status.fully_resolved.alignment,
@@ -9967,6 +10056,7 @@ pub const WipEnumType = struct {
pub fn getEnumType(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
ini: EnumTypeInit,
/// If it is known that there is an existing type with this key which is outdated,
@@ -9988,18 +10078,18 @@ pub fn getEnumType(
} },
} };
var gop = if (replace_existing)
- ip.putKeyReplace(tid, key)
+ ip.putKeyReplace(io, tid, key)
else
- try ip.getOrPutKey(gpa, tid, key);
+ try ip.getOrPutKey(gpa, io, tid, key);
defer gop.deinit();
if (gop == .existing) return .{ .existing = gop.existing };
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
+ const items = local.getMutableItems(gpa, io);
try items.ensureUnusedCapacity(1);
- const extra = local.getMutableExtra(gpa);
+ const extra = local.getMutableExtra(gpa, io);
- const names_map = try ip.addMap(gpa, tid, ini.fields_len);
+ const names_map = try ip.addMap(gpa, io, tid, ini.fields_len);
errdefer local.mutate.maps.len -= 1;
switch (ini.tag_mode) {
@@ -10056,7 +10146,7 @@ pub fn getEnumType(
},
.explicit, .nonexhaustive => {
const values_map: OptionalMapIndex = if (!ini.has_values) .none else m: {
- const values_map = try ip.addMap(gpa, tid, ini.fields_len);
+ const values_map = try ip.addMap(gpa, io, tid, ini.fields_len);
break :m values_map.toOptional();
};
errdefer if (ini.has_values) {
@@ -10141,6 +10231,7 @@ const GeneratedTagEnumTypeInit = struct {
pub fn getGeneratedTagEnumType(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
ini: GeneratedTagEnumTypeInit,
) Allocator.Error!Index {
@@ -10149,11 +10240,11 @@ pub fn getGeneratedTagEnumType(
for (ini.values) |val| assert(ip.typeOf(val) == ini.tag_ty);
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
+ const items = local.getMutableItems(gpa, io);
try items.ensureUnusedCapacity(1);
- const extra = local.getMutableExtra(gpa);
+ const extra = local.getMutableExtra(gpa, io);
- const names_map = try ip.addMap(gpa, tid, ini.names.len);
+ const names_map = try ip.addMap(gpa, io, tid, ini.names.len);
errdefer local.mutate.maps.len -= 1;
ip.addStringsToMap(names_map, ini.names);
@@ -10165,7 +10256,7 @@ pub fn getGeneratedTagEnumType(
.index = items.mutate.len,
}, ip);
const parent_namespace = ip.namespacePtr(ini.parent_namespace);
- const namespace = try ip.createNamespace(gpa, tid, .{
+ const namespace = try ip.createNamespace(gpa, io, tid, .{
.parent = ini.parent_namespace.toOptional(),
.owner_type = enum_index,
.file_scope = parent_namespace.file_scope,
@@ -10202,7 +10293,7 @@ pub fn getGeneratedTagEnumType(
ini.values.len); // field values
const values_map: OptionalMapIndex = if (ini.values.len != 0) m: {
- const map = try ip.addMap(gpa, tid, ini.values.len);
+ const map = try ip.addMap(gpa, io, tid, ini.values.len);
ip.addIndexesToMap(map, ini.values);
break :m map.toOptional();
} else .none;
@@ -10240,7 +10331,7 @@ pub fn getGeneratedTagEnumType(
},
};
- var gop = try ip.getOrPutKey(gpa, tid, .{ .enum_type = .{
+ var gop = try ip.getOrPutKey(gpa, io, tid, .{ .enum_type = .{
.generated_tag = .{ .union_type = ini.owner_union_ty },
} });
defer gop.deinit();
@@ -10256,10 +10347,11 @@ pub const OpaqueTypeInit = struct {
pub fn getOpaqueType(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
ini: OpaqueTypeInit,
) Allocator.Error!WipNamespaceType.Result {
- var gop = try ip.getOrPutKey(gpa, tid, .{ .opaque_type = .{ .declared = .{
+ var gop = try ip.getOrPutKey(gpa, io, tid, .{ .opaque_type = .{ .declared = .{
.zir_index = ini.zir_index,
.captures = .{ .external = ini.captures },
} } });
@@ -10267,8 +10359,8 @@ pub fn getOpaqueType(
if (gop == .existing) return .{ .existing = gop.existing };
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
- const extra = local.getMutableExtra(gpa);
+ const items = local.getMutableItems(gpa, io);
+ const extra = local.getMutableExtra(gpa, io);
try items.ensureUnusedCapacity(1);
try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeOpaque).@"struct".fields.len + ini.captures.len);
@@ -10338,8 +10430,8 @@ fn addIndexesToMap(
}
}
-fn addMap(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, cap: usize) Allocator.Error!MapIndex {
- const maps = ip.getLocal(tid).getMutableMaps(gpa);
+fn addMap(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, cap: usize) Allocator.Error!MapIndex {
+ const maps = ip.getLocal(tid).getMutableMaps(gpa, io);
const unwrapped: MapIndex.Unwrapped = .{ .tid = tid, .index = maps.mutate.len };
const ptr = try maps.addOne();
errdefer maps.mutate.len = unwrapped.index;
@@ -10373,14 +10465,15 @@ pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void {
fn addInt(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
ty: Index,
tag: Tag,
limbs: []const Limb,
) !void {
const local = ip.getLocal(tid);
- const items_list = local.getMutableItems(gpa);
- const limbs_list = local.getMutableLimbs(gpa);
+ const items_list = local.getMutableItems(gpa, io);
+ const limbs_list = local.getMutableLimbs(gpa, io);
const limbs_len: u32 = @intCast(limbs.len);
try limbs_list.ensureUnusedCapacity(Int.limbs_items_len + limbs_len);
items_list.appendAssumeCapacity(.{
@@ -10510,28 +10603,29 @@ fn extraData(extra: Local.Extra, comptime T: type, index: u32) T {
test "basic usage" {
const gpa = std.testing.allocator;
+ const io = std.testing.io;
var ip: InternPool = .empty;
- try ip.init(gpa, 1);
- defer ip.deinit(gpa);
+ try ip.init(gpa, io, 1);
+ defer ip.deinit(gpa, io);
- const i32_type = try ip.get(gpa, .main, .{ .int_type = .{
+ const i32_type = try ip.get(gpa, io, .main, .{ .int_type = .{
.signedness = .signed,
.bits = 32,
} });
- const array_i32 = try ip.get(gpa, .main, .{ .array_type = .{
+ const array_i32 = try ip.get(gpa, io, .main, .{ .array_type = .{
.len = 10,
.child = i32_type,
.sentinel = .none,
} });
- const another_i32_type = try ip.get(gpa, .main, .{ .int_type = .{
+ const another_i32_type = try ip.get(gpa, io, .main, .{ .int_type = .{
.signedness = .signed,
.bits = 32,
} });
try std.testing.expect(another_i32_type == i32_type);
- const another_array_i32 = try ip.get(gpa, .main, .{ .array_type = .{
+ const another_array_i32 = try ip.get(gpa, io, .main, .{ .array_type = .{
.len = 10,
.child = i32_type,
.sentinel = .none,
@@ -10608,6 +10702,7 @@ pub fn sliceLen(ip: *const InternPool, index: Index) Index {
pub fn getCoerced(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
val: Index,
new_ty: Index,
@@ -10616,22 +10711,22 @@ pub fn getCoerced(
if (old_ty == new_ty) return val;
switch (val) {
- .undef => return ip.get(gpa, tid, .{ .undef = new_ty }),
+ .undef => return ip.get(gpa, io, tid, .{ .undef = new_ty }),
.null_value => {
- if (ip.isOptionalType(new_ty)) return ip.get(gpa, tid, .{ .opt = .{
+ if (ip.isOptionalType(new_ty)) return ip.get(gpa, io, tid, .{ .opt = .{
.ty = new_ty,
.val = .none,
} });
if (ip.isPointerType(new_ty)) switch (ip.indexToKey(new_ty).ptr_type.flags.size) {
- .one, .many, .c => return ip.get(gpa, tid, .{ .ptr = .{
+ .one, .many, .c => return ip.get(gpa, io, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = .int,
.byte_offset = 0,
} }),
- .slice => return ip.get(gpa, tid, .{ .slice = .{
+ .slice => return ip.get(gpa, io, tid, .{ .slice = .{
.ty = new_ty,
- .ptr = try ip.get(gpa, tid, .{ .ptr = .{
+ .ptr = try ip.get(gpa, io, tid, .{ .ptr = .{
.ty = ip.slicePtrType(new_ty),
.base_addr = .int,
.byte_offset = 0,
@@ -10644,15 +10739,15 @@ pub fn getCoerced(
const unwrapped_val = val.unwrap(ip);
const val_item = unwrapped_val.getItem(ip);
switch (val_item.tag) {
- .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty),
- .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty),
+ .func_decl => return getCoercedFuncDecl(ip, gpa, io, tid, val, new_ty),
+ .func_instance => return getCoercedFuncInstance(ip, gpa, io, tid, val, new_ty),
.func_coerced => {
const func: Index = @enumFromInt(unwrapped_val.getExtra(ip).view().items(.@"0")[
val_item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?
]);
switch (func.unwrap(ip).getTag(ip)) {
- .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty),
- .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty),
+ .func_decl => return getCoercedFuncDecl(ip, gpa, io, tid, val, new_ty),
+ .func_instance => return getCoercedFuncInstance(ip, gpa, io, tid, val, new_ty),
else => unreachable,
}
},
@@ -10662,16 +10757,16 @@ pub fn getCoerced(
}
switch (ip.indexToKey(val)) {
- .undef => return ip.get(gpa, tid, .{ .undef = new_ty }),
+ .undef => return ip.get(gpa, io, tid, .{ .undef = new_ty }),
.func => unreachable,
.int => |int| switch (ip.indexToKey(new_ty)) {
- .enum_type => return ip.get(gpa, tid, .{ .enum_tag = .{
+ .enum_type => return ip.get(gpa, io, tid, .{ .enum_tag = .{
.ty = new_ty,
- .int = try ip.getCoerced(gpa, tid, val, ip.loadEnumType(new_ty).tag_ty),
+ .int = try ip.getCoerced(gpa, io, tid, val, ip.loadEnumType(new_ty).tag_ty),
} }),
.ptr_type => switch (int.storage) {
- inline .u64, .i64 => |int_val| return ip.get(gpa, tid, .{ .ptr = .{
+ inline .u64, .i64 => |int_val| return ip.get(gpa, io, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = .int,
.byte_offset = @intCast(int_val),
@@ -10680,7 +10775,7 @@ pub fn getCoerced(
.lazy_align, .lazy_size => {},
},
else => if (ip.isIntegerType(new_ty))
- return ip.getCoercedInts(gpa, tid, int, new_ty),
+ return ip.getCoercedInts(gpa, io, tid, int, new_ty),
},
.float => |float| switch (ip.indexToKey(new_ty)) {
.simple_type => |simple| switch (simple) {
@@ -10691,7 +10786,7 @@ pub fn getCoerced(
.f128,
.c_longdouble,
.comptime_float,
- => return ip.get(gpa, tid, .{ .float = .{
+ => return ip.get(gpa, io, tid, .{ .float = .{
.ty = new_ty,
.storage = float.storage,
} }),
@@ -10700,17 +10795,17 @@ pub fn getCoerced(
else => {},
},
.enum_tag => |enum_tag| if (ip.isIntegerType(new_ty))
- return ip.getCoercedInts(gpa, tid, ip.indexToKey(enum_tag.int).int, new_ty),
+ return ip.getCoercedInts(gpa, io, tid, ip.indexToKey(enum_tag.int).int, new_ty),
.enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) {
.enum_type => {
const enum_type = ip.loadEnumType(new_ty);
const index = enum_type.nameIndex(ip, enum_literal).?;
- return ip.get(gpa, tid, .{ .enum_tag = .{
+ return ip.get(gpa, io, tid, .{ .enum_tag = .{
.ty = new_ty,
.int = if (enum_type.values.len != 0)
enum_type.values.get(ip)[index]
else
- try ip.get(gpa, tid, .{ .int = .{
+ try ip.get(gpa, io, tid, .{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = index },
} }),
@@ -10719,22 +10814,22 @@ pub fn getCoerced(
else => {},
},
.slice => |slice| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size == .slice)
- return ip.get(gpa, tid, .{ .slice = .{
+ return ip.get(gpa, io, tid, .{ .slice = .{
.ty = new_ty,
- .ptr = try ip.getCoerced(gpa, tid, slice.ptr, ip.slicePtrType(new_ty)),
+ .ptr = try ip.getCoerced(gpa, io, tid, slice.ptr, ip.slicePtrType(new_ty)),
.len = slice.len,
} })
else if (ip.isIntegerType(new_ty))
- return ip.getCoerced(gpa, tid, slice.ptr, new_ty),
+ return ip.getCoerced(gpa, io, tid, slice.ptr, new_ty),
.ptr => |ptr| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size != .slice)
- return ip.get(gpa, tid, .{ .ptr = .{
+ return ip.get(gpa, io, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = ptr.base_addr,
.byte_offset = ptr.byte_offset,
} })
else if (ip.isIntegerType(new_ty))
switch (ptr.base_addr) {
- .int => return ip.get(gpa, tid, .{ .int = .{
+ .int => return ip.get(gpa, io, tid, .{ .int = .{
.ty = .usize_type,
.storage = .{ .u64 = @intCast(ptr.byte_offset) },
} }),
@@ -10743,14 +10838,14 @@ pub fn getCoerced(
.opt => |opt| switch (ip.indexToKey(new_ty)) {
.ptr_type => |ptr_type| return switch (opt.val) {
.none => switch (ptr_type.flags.size) {
- .one, .many, .c => try ip.get(gpa, tid, .{ .ptr = .{
+ .one, .many, .c => try ip.get(gpa, io, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = .int,
.byte_offset = 0,
} }),
- .slice => try ip.get(gpa, tid, .{ .slice = .{
+ .slice => try ip.get(gpa, io, tid, .{ .slice = .{
.ty = new_ty,
- .ptr = try ip.get(gpa, tid, .{ .ptr = .{
+ .ptr = try ip.get(gpa, io, tid, .{ .ptr = .{
.ty = ip.slicePtrType(new_ty),
.base_addr = .int,
.byte_offset = 0,
@@ -10758,29 +10853,29 @@ pub fn getCoerced(
.len = .undef_usize,
} }),
},
- else => |payload| try ip.getCoerced(gpa, tid, payload, new_ty),
+ else => |payload| try ip.getCoerced(gpa, io, tid, payload, new_ty),
},
- .opt_type => |child_type| return try ip.get(gpa, tid, .{ .opt = .{
+ .opt_type => |child_type| return try ip.get(gpa, io, tid, .{ .opt = .{
.ty = new_ty,
.val = switch (opt.val) {
.none => .none,
- else => try ip.getCoerced(gpa, tid, opt.val, child_type),
+ else => try ip.getCoerced(gpa, io, tid, opt.val, child_type),
},
} }),
else => {},
},
.err => |err| if (ip.isErrorSetType(new_ty))
- return ip.get(gpa, tid, .{ .err = .{
+ return ip.get(gpa, io, tid, .{ .err = .{
.ty = new_ty,
.name = err.name,
} })
else if (ip.isErrorUnionType(new_ty))
- return ip.get(gpa, tid, .{ .error_union = .{
+ return ip.get(gpa, io, tid, .{ .error_union = .{
.ty = new_ty,
.val = .{ .err_name = err.name },
} }),
.error_union => |error_union| if (ip.isErrorUnionType(new_ty))
- return ip.get(gpa, tid, .{ .error_union = .{
+ return ip.get(gpa, io, tid, .{ .error_union = .{
.ty = new_ty,
.val = error_union.val,
} }),
@@ -10799,20 +10894,20 @@ pub fn getCoerced(
};
if (old_ty_child != new_ty_child) break :direct;
switch (aggregate.storage) {
- .bytes => |bytes| return ip.get(gpa, tid, .{ .aggregate = .{
+ .bytes => |bytes| return ip.get(gpa, io, tid, .{ .aggregate = .{
.ty = new_ty,
.storage = .{ .bytes = bytes },
} }),
.elems => |elems| {
const elems_copy = try gpa.dupe(Index, elems[0..new_len]);
defer gpa.free(elems_copy);
- return ip.get(gpa, tid, .{ .aggregate = .{
+ return ip.get(gpa, io, tid, .{ .aggregate = .{
.ty = new_ty,
.storage = .{ .elems = elems_copy },
} });
},
.repeated_elem => |elem| {
- return ip.get(gpa, tid, .{ .aggregate = .{
+ return ip.get(gpa, io, tid, .{ .aggregate = .{
.ty = new_ty,
.storage = .{ .repeated_elem = elem },
} });
@@ -10830,7 +10925,7 @@ pub fn getCoerced(
// We have to intern each value here, so unfortunately we can't easily avoid
// the repeated indexToKey calls.
for (agg_elems, 0..) |*elem, index| {
- elem.* = try ip.get(gpa, tid, .{ .int = .{
+ elem.* = try ip.get(gpa, io, tid, .{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes.at(index, ip) },
} });
@@ -10847,27 +10942,27 @@ pub fn getCoerced(
.struct_type => ip.loadStructType(new_ty).field_types.get(ip)[i],
else => unreachable,
};
- elem.* = try ip.getCoerced(gpa, tid, elem.*, new_elem_ty);
+ elem.* = try ip.getCoerced(gpa, io, tid, elem.*, new_elem_ty);
}
- return ip.get(gpa, tid, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = agg_elems } } });
+ return ip.get(gpa, io, tid, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = agg_elems } } });
},
else => {},
}
switch (ip.indexToKey(new_ty)) {
.opt_type => |child_type| switch (val) {
- .null_value => return ip.get(gpa, tid, .{ .opt = .{
+ .null_value => return ip.get(gpa, io, tid, .{ .opt = .{
.ty = new_ty,
.val = .none,
} }),
- else => return ip.get(gpa, tid, .{ .opt = .{
+ else => return ip.get(gpa, io, tid, .{ .opt = .{
.ty = new_ty,
- .val = try ip.getCoerced(gpa, tid, val, child_type),
+ .val = try ip.getCoerced(gpa, io, tid, val, child_type),
} }),
},
- .error_union_type => |error_union_type| return ip.get(gpa, tid, .{ .error_union = .{
+ .error_union_type => |error_union_type| return ip.get(gpa, io, tid, .{ .error_union = .{
.ty = new_ty,
- .val = .{ .payload = try ip.getCoerced(gpa, tid, val, error_union_type.payload_type) },
+ .val = .{ .payload = try ip.getCoerced(gpa, io, tid, val, error_union_type.payload_type) },
} }),
else => {},
}
@@ -10884,6 +10979,7 @@ pub fn getCoerced(
fn getCoercedFuncDecl(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
val: Index,
new_ty: Index,
@@ -10893,12 +10989,13 @@ fn getCoercedFuncDecl(
unwrapped_val.getData(ip) + std.meta.fieldIndex(Tag.FuncDecl, "ty").?
]);
if (new_ty == prev_ty) return val;
- return getCoercedFunc(ip, gpa, tid, val, new_ty);
+ return getCoercedFunc(ip, gpa, io, tid, val, new_ty);
}
fn getCoercedFuncInstance(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
val: Index,
new_ty: Index,
@@ -10908,20 +11005,21 @@ fn getCoercedFuncInstance(
unwrapped_val.getData(ip) + std.meta.fieldIndex(Tag.FuncInstance, "ty").?
]);
if (new_ty == prev_ty) return val;
- return getCoercedFunc(ip, gpa, tid, val, new_ty);
+ return getCoercedFunc(ip, gpa, io, tid, val, new_ty);
}
fn getCoercedFunc(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
func: Index,
ty: Index,
) Allocator.Error!Index {
const local = ip.getLocal(tid);
- const items = local.getMutableItems(gpa);
+ const items = local.getMutableItems(gpa, io);
try items.ensureUnusedCapacity(1);
- const extra = local.getMutableExtra(gpa);
+ const extra = local.getMutableExtra(gpa, io);
const prev_extra_len = extra.mutate.len;
try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncCoerced).@"struct".fields.len);
@@ -10932,7 +11030,7 @@ fn getCoercedFunc(
});
errdefer extra.mutate.len = prev_extra_len;
- var gop = try ip.getOrPutKey(gpa, tid, .{
+ var gop = try ip.getOrPutKey(gpa, io, tid, .{
.func = ip.extraFuncCoerced(extra.list.*, extra_index),
});
defer gop.deinit();
@@ -10950,8 +11048,15 @@ fn getCoercedFunc(
/// Asserts `val` has an integer type.
/// Assumes `new_ty` is an integer type.
-pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, int: Key.Int, new_ty: Index) Allocator.Error!Index {
- return ip.get(gpa, tid, .{ .int = .{
+pub fn getCoercedInts(
+ ip: *InternPool,
+ gpa: Allocator,
+ io: Io,
+ tid: Zcu.PerThread.Id,
+ int: Key.Int,
+ new_ty: Index,
+) Allocator.Error!Index {
+ return ip.get(gpa, io, tid, .{ .int = .{
.ty = new_ty,
.storage = int.storage,
} });
@@ -11047,12 +11152,12 @@ pub fn errorUnionPayload(ip: *const InternPool, ty: Index) Index {
}
/// The is only legal because the initializer is not part of the hash.
-pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void {
+pub fn mutateVarInit(ip: *InternPool, io: Io, index: Index, init_index: Index) void {
const unwrapped_index = index.unwrap(ip);
const local = ip.getLocal(unwrapped_index.tid);
- local.mutate.extra.mutex.lock();
- defer local.mutate.extra.mutex.unlock();
+ local.mutate.extra.mutex.lockUncancelable(io);
+ defer local.mutate.extra.mutex.unlock(io);
const extra_items = local.shared.extra.view().items(.@"0");
const item = unwrapped_index.getItem(ip);
@@ -11508,11 +11613,12 @@ pub fn namespacePtr(ip: *InternPool, namespace_index: NamespaceIndex) *Zcu.Names
pub fn createComptimeUnit(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
zir_index: TrackedInst.Index,
namespace: NamespaceIndex,
) Allocator.Error!ComptimeUnit.Id {
- const comptime_units = ip.getLocal(tid).getMutableComptimeUnits(gpa);
+ const comptime_units = ip.getLocal(tid).getMutableComptimeUnits(gpa, io);
const id_unwrapped: ComptimeUnit.Id.Unwrapped = .{
.tid = tid,
.index = comptime_units.mutate.len,
@@ -11532,9 +11638,10 @@ pub fn getComptimeUnit(ip: *const InternPool, id: ComptimeUnit.Id) ComptimeUnit
/// Create a `Nav` which does not undergo semantic analysis.
/// Since it is never analyzed, the `Nav`'s value must be known at creation time.
-pub fn createNav(
+fn createNav(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
opts: struct {
name: NullTerminatedString,
@@ -11546,7 +11653,7 @@ pub fn createNav(
@"addrspace": std.builtin.AddressSpace,
},
) Allocator.Error!Nav.Index {
- const navs = ip.getLocal(tid).getMutableNavs(gpa);
+ const navs = ip.getLocal(tid).getMutableNavs(gpa, io);
const index_unwrapped: Nav.Index.Unwrapped = .{
.tid = tid,
.index = navs.mutate.len,
@@ -11571,13 +11678,14 @@ pub fn createNav(
pub fn createDeclNav(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
name: NullTerminatedString,
fqn: NullTerminatedString,
zir_index: TrackedInst.Index,
namespace: NamespaceIndex,
) Allocator.Error!Nav.Index {
- const navs = ip.getLocal(tid).getMutableNavs(gpa);
+ const navs = ip.getLocal(tid).getMutableNavs(gpa, io);
try navs.ensureUnusedCapacity(1);
@@ -11603,6 +11711,7 @@ pub fn createDeclNav(
/// If its status is already `resolved`, the old value is discarded.
pub fn resolveNavType(
ip: *InternPool,
+ io: Io,
nav: Nav.Index,
resolved: struct {
type: InternPool.Index,
@@ -11617,8 +11726,8 @@ pub fn resolveNavType(
const unwrapped = nav.unwrap(ip);
const local = ip.getLocal(unwrapped.tid);
- local.mutate.extra.mutex.lock();
- defer local.mutate.extra.mutex.unlock();
+ local.mutate.extra.mutex.lockUncancelable(io);
+ defer local.mutate.extra.mutex.unlock(io);
const navs = local.shared.navs.view();
@@ -11647,6 +11756,7 @@ pub fn resolveNavType(
/// If its status is already `resolved`, the old value is discarded.
pub fn resolveNavValue(
ip: *InternPool,
+ io: Io,
nav: Nav.Index,
resolved: struct {
val: InternPool.Index,
@@ -11659,8 +11769,8 @@ pub fn resolveNavValue(
const unwrapped = nav.unwrap(ip);
const local = ip.getLocal(unwrapped.tid);
- local.mutate.extra.mutex.lock();
- defer local.mutate.extra.mutex.unlock();
+ local.mutate.extra.mutex.lockUncancelable(io);
+ defer local.mutate.extra.mutex.unlock(io);
const navs = local.shared.navs.view();
@@ -11687,6 +11797,7 @@ pub fn resolveNavValue(
pub fn createNamespace(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
initialization: Zcu.Namespace,
) Allocator.Error!NamespaceIndex {
@@ -11700,7 +11811,7 @@ pub fn createNamespace(
reused_namespace.* = initialization;
return reused_namespace_index;
}
- const namespaces = local.getMutableNamespaces(gpa);
+ const namespaces = local.getMutableNamespaces(gpa, io);
const last_bucket_len = local.mutate.namespaces.last_bucket_len & Local.namespaces_bucket_mask;
if (last_bucket_len == 0) {
try namespaces.ensureUnusedCapacity(1);
@@ -11748,10 +11859,11 @@ pub fn filePtr(ip: *const InternPool, file_index: FileIndex) *Zcu.File {
pub fn createFile(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
file: File,
) Allocator.Error!FileIndex {
- const files = ip.getLocal(tid).getMutableFiles(gpa);
+ const files = ip.getLocal(tid).getMutableFiles(gpa, io);
const file_index_unwrapped: FileIndex.Unwrapped = .{
.tid = tid,
.index = files.mutate.len,
@@ -11782,20 +11894,22 @@ const EmbeddedNulls = enum {
pub fn getOrPutString(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
slice: []const u8,
comptime embedded_nulls: EmbeddedNulls,
) Allocator.Error!embedded_nulls.StringType() {
- const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa);
+ const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa, io);
try string_bytes.ensureUnusedCapacity(slice.len + 1);
string_bytes.appendSliceAssumeCapacity(.{slice});
string_bytes.appendAssumeCapacity(.{0});
- return ip.getOrPutTrailingString(gpa, tid, @intCast(slice.len + 1), embedded_nulls);
+ return ip.getOrPutTrailingString(gpa, io, tid, @intCast(slice.len + 1), embedded_nulls);
}
pub fn getOrPutStringFmt(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
comptime format: []const u8,
args: anytype,
@@ -11804,20 +11918,21 @@ pub fn getOrPutStringFmt(
// ensure that references to strings in args do not get invalidated
const format_z = format ++ .{0};
const len: u32 = @intCast(std.fmt.count(format_z, args));
- const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa);
+ const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa, io);
const slice = try string_bytes.addManyAsSlice(len);
assert((std.fmt.bufPrint(slice[0], format_z, args) catch unreachable).len == len);
- return ip.getOrPutTrailingString(gpa, tid, len, embedded_nulls);
+ return ip.getOrPutTrailingString(gpa, io, tid, len, embedded_nulls);
}
pub fn getOrPutStringOpt(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
slice: ?[]const u8,
comptime embedded_nulls: EmbeddedNulls,
) Allocator.Error!embedded_nulls.OptionalStringType() {
- const string = try getOrPutString(ip, gpa, tid, slice orelse return .none, embedded_nulls);
+ const string = try getOrPutString(ip, gpa, io, tid, slice orelse return .none, embedded_nulls);
return string.toOptional();
}
@@ -11825,14 +11940,15 @@ pub fn getOrPutStringOpt(
pub fn getOrPutTrailingString(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
len: u32,
comptime embedded_nulls: EmbeddedNulls,
) Allocator.Error!embedded_nulls.StringType() {
const local = ip.getLocal(tid);
- const strings = local.getMutableStrings(gpa);
+ const strings = local.getMutableStrings(gpa, io);
try strings.ensureUnusedCapacity(1);
- const string_bytes = local.getMutableStringBytes(gpa);
+ const string_bytes = local.getMutableStringBytes(gpa, io);
const start: u32 = @intCast(string_bytes.mutate.len - len);
if (len > 0 and string_bytes.view().items(.@"0")[string_bytes.mutate.len - 1] == 0) {
string_bytes.mutate.len -= 1;
@@ -11870,8 +11986,8 @@ pub fn getOrPutTrailingString(
string_bytes.shrinkRetainingCapacity(start);
return @enumFromInt(@intFromEnum(index));
}
- shard.mutate.string_map.mutex.lock();
- defer shard.mutate.string_map.mutex.unlock();
+ shard.mutate.string_map.mutex.lock(io, tid);
+ defer shard.mutate.string_map.mutex.unlock(io);
if (map.entries != shard.shared.string_map.entries) {
map = shard.shared.string_map;
map_mask = map.header().mask();
@@ -12590,11 +12706,11 @@ pub fn funcAnalysisUnordered(ip: *const InternPool, func: Index) FuncAnalysis {
return @atomicLoad(FuncAnalysis, ip.funcAnalysisPtr(func), .unordered);
}
-pub fn funcSetHasErrorTrace(ip: *InternPool, func: Index, has_error_trace: bool) void {
+pub fn funcSetHasErrorTrace(ip: *InternPool, io: Io, func: Index, has_error_trace: bool) void {
const unwrapped_func = func.unwrap(ip);
const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const analysis_ptr = ip.funcAnalysisPtr(func);
var analysis = analysis_ptr.*;
@@ -12602,11 +12718,11 @@ pub fn funcSetHasErrorTrace(ip: *InternPool, func: Index, has_error_trace: bool)
@atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
}
-pub fn funcSetDisableInstrumentation(ip: *InternPool, func: Index) void {
+pub fn funcSetDisableInstrumentation(ip: *InternPool, io: Io, func: Index) void {
const unwrapped_func = func.unwrap(ip);
const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const analysis_ptr = ip.funcAnalysisPtr(func);
var analysis = analysis_ptr.*;
@@ -12614,11 +12730,11 @@ pub fn funcSetDisableInstrumentation(ip: *InternPool, func: Index) void {
@atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
}
-pub fn funcSetDisableIntrinsics(ip: *InternPool, func: Index) void {
+pub fn funcSetDisableIntrinsics(ip: *InternPool, io: Io, func: Index) void {
const unwrapped_func = func.unwrap(ip);
const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
const analysis_ptr = ip.funcAnalysisPtr(func);
var analysis = analysis_ptr.*;
@@ -12666,15 +12782,6 @@ pub fn iesFuncIndex(ip: *const InternPool, ies_index: Index) Index {
/// Returns a mutable pointer to the resolved error set type of an inferred
/// error set function. The returned pointer is invalidated when anything is
/// added to `ip`.
-fn iesResolvedPtr(ip: *InternPool, ies_index: Index) *Index {
- const ies_item = ies_index.getItem(ip);
- assert(ies_item.tag == .type_inferred_error_set);
- return ip.funcIesResolvedPtr(ies_item.data);
-}
-
-/// Returns a mutable pointer to the resolved error set type of an inferred
-/// error set function. The returned pointer is invalidated when anything is
-/// added to `ip`.
fn funcIesResolvedPtr(ip: *const InternPool, func_index: Index) *Index {
assert(ip.funcAnalysisUnordered(func_index).inferred_error_set);
const unwrapped_func = func_index.unwrap(ip);
@@ -12706,11 +12813,11 @@ pub fn funcIesResolvedUnordered(ip: *const InternPool, index: Index) Index {
return @atomicLoad(Index, ip.funcIesResolvedPtr(index), .unordered);
}
-pub fn funcSetIesResolved(ip: *InternPool, index: Index, ies: Index) void {
+pub fn funcSetIesResolved(ip: *InternPool, io: Io, index: Index, ies: Index) void {
const unwrapped_func = index.unwrap(ip);
const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
- extra_mutex.lock();
- defer extra_mutex.unlock();
+ extra_mutex.lockUncancelable(io);
+ defer extra_mutex.unlock(io);
@atomicStore(Index, ip.funcIesResolvedPtr(index), ies, .release);
}
@@ -12777,19 +12884,19 @@ const GlobalErrorSet = struct {
} align(std.atomic.cache_line),
mutate: struct {
names: Local.ListMutate,
- map: struct { mutex: std.Thread.Mutex },
+ map: struct { mutex: Io.Mutex },
} align(std.atomic.cache_line),
const Names = Local.List(struct { NullTerminatedString });
const empty: GlobalErrorSet = .{
.shared = .{
- .names = Names.empty,
- .map = Shard.Map(GlobalErrorSet.Index).empty,
+ .names = .empty,
+ .map = .empty,
},
.mutate = .{
- .names = Local.ListMutate.empty,
- .map = .{ .mutex = .{} },
+ .names = .empty,
+ .map = .{ .mutex = .init },
},
};
@@ -12807,6 +12914,7 @@ const GlobalErrorSet = struct {
fn getErrorValue(
ges: *GlobalErrorSet,
gpa: Allocator,
+ io: Io,
arena_state: *std.heap.ArenaAllocator.State,
name: NullTerminatedString,
) Allocator.Error!GlobalErrorSet.Index {
@@ -12825,8 +12933,8 @@ const GlobalErrorSet = struct {
if (entry.hash != hash) continue;
if (names.view().items(.@"0")[@intFromEnum(index) - 1] == name) return index;
}
- ges.mutate.map.mutex.lock();
- defer ges.mutate.map.mutex.unlock();
+ ges.mutate.map.mutex.lockUncancelable(io);
+ defer ges.mutate.map.mutex.unlock(io);
if (map.entries != ges.shared.map.entries) {
map = ges.shared.map;
map_mask = map.header().mask();
@@ -12842,6 +12950,7 @@ const GlobalErrorSet = struct {
}
const mutable_names: Names.Mutable = .{
.gpa = gpa,
+ .io = io,
.arena = arena_state,
.mutate = &ges.mutate.names,
.list = &ges.shared.names,
@@ -12923,10 +13032,11 @@ const GlobalErrorSet = struct {
pub fn getErrorValue(
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
name: NullTerminatedString,
) Allocator.Error!Zcu.ErrorInt {
- return @intFromEnum(try ip.global_error_set.getErrorValue(gpa, &ip.getLocal(tid).mutate.arena, name));
+ return @intFromEnum(try ip.global_error_set.getErrorValue(gpa, io, &ip.getLocal(tid).mutate.arena, name));
}
pub fn getErrorValueIfExists(ip: *const InternPool, name: NullTerminatedString) ?Zcu.ErrorInt {
diff --git a/src/Sema.zig b/src/Sema.zig
index baeb9a62de..fec6850c4c 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -853,8 +853,9 @@ pub const Block = struct {
fn trackZir(block: *Block, inst: Zir.Inst.Index) Allocator.Error!InternPool.TrackedInst.Index {
const pt = block.sema.pt;
+ const comp = pt.zcu.comp;
block.sema.code.assertTrackable(inst);
- return pt.zcu.intern_pool.trackZir(pt.zcu.gpa, pt.tid, .{
+ return pt.zcu.intern_pool.trackZir(comp.gpa, comp.io, pt.tid, .{
.file = block.getFileScopeIndex(pt.zcu),
.inst = inst,
});
@@ -1061,7 +1062,7 @@ fn analyzeInlineBody(
/// The index which a break instruction can target to break from this body.
break_target: Zir.Inst.Index,
) CompileError!?Air.Inst.Ref {
- if (sema.analyzeBodyInner(block, body)) |_| {
+ if (sema.analyzeBodyInner(block, body)) {
return null;
} else |err| switch (err) {
error.ComptimeBreak => {},
@@ -1808,7 +1809,7 @@ fn analyzeBodyInner(
child_block.instructions = block.instructions;
defer block.instructions = child_block.instructions;
- const break_result: ?BreakResult = if (sema.analyzeBodyInner(&child_block, inline_body)) |_| r: {
+ const break_result: ?BreakResult = if (sema.analyzeBodyInner(&child_block, inline_body)) r: {
break :r null;
} else |err| switch (err) {
error.ComptimeBreak => brk_res: {
@@ -1956,7 +1957,7 @@ fn analyzeBodyInner(
.@"defer" => blk: {
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].@"defer";
const defer_body = sema.code.bodySlice(inst_data.index, inst_data.len);
- if (sema.analyzeBodyInner(block, defer_body)) |_| {
+ if (sema.analyzeBodyInner(block, defer_body)) {
// The defer terminated noreturn - no more analysis needed.
break;
} else |err| switch (err) {
@@ -1975,7 +1976,7 @@ fn analyzeBodyInner(
const err_code = try sema.resolveInst(inst_data.err_code);
try map.ensureSpaceForInstructions(sema.gpa, defer_body);
map.putAssumeCapacity(extra.remapped_err_code, err_code);
- if (sema.analyzeBodyInner(block, defer_body)) |_| {
+ if (sema.analyzeBodyInner(block, defer_body)) {
// The defer terminated noreturn - no more analysis needed.
break;
} else |err| switch (err) {
@@ -2205,10 +2206,11 @@ fn analyzeAsType(
pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void {
const pt = sema.pt;
- const zcu = pt.zcu;
- const comp = zcu.comp;
- const gpa = sema.gpa;
- const ip = &zcu.intern_pool;
+ const comp = pt.zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const ip = &pt.zcu.intern_pool;
+
if (!comp.config.any_error_tracing) return;
assert(!block.isComptime());
@@ -2231,12 +2233,12 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize)
const st_ptr = try err_trace_block.addTy(.alloc, try pt.singleMutPtrType(stack_trace_ty));
// st.instruction_addresses = &addrs;
- const instruction_addresses_field_name = try ip.getOrPutString(gpa, pt.tid, "instruction_addresses", .no_embedded_nulls);
+ const instruction_addresses_field_name = try ip.getOrPutString(gpa, io, pt.tid, "instruction_addresses", .no_embedded_nulls);
const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, instruction_addresses_field_name, src, true);
try sema.storePtr2(&err_trace_block, src, addr_field_ptr, src, addrs_ptr, src, .store);
// st.index = 0;
- const index_field_name = try ip.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls);
+ const index_field_name = try ip.getOrPutString(gpa, io, pt.tid, "index", .no_embedded_nulls);
const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, index_field_name, src, true);
try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store);
@@ -2828,9 +2830,12 @@ fn zirTupleDecl(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
- const gpa = sema.gpa;
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const fields_len = extended.small;
const extra = sema.code.extraData(Zir.Inst.TupleDecl, extended.operand);
var extra_index = extra.end;
@@ -2863,7 +2868,7 @@ fn zirTupleDecl(
const coerced_field_init = try sema.coerce(block, field_type, uncoerced_field_init, init_src);
const field_init_val = try sema.resolveConstDefinedValue(block, init_src, coerced_field_init, .{ .simple = .tuple_field_default_value });
if (field_init_val.canMutateComptimeVarState(zcu)) {
- const field_name = try zcu.intern_pool.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
+ const field_name = try zcu.intern_pool.getOrPutStringFmt(gpa, io, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
return sema.failWithContainsReferenceToComptimeVar(block, init_src, field_name, "field default value", field_init_val);
}
break :init field_init_val.toIntern();
@@ -2872,7 +2877,7 @@ fn zirTupleDecl(
};
}
- return Air.internedToRef(try zcu.intern_pool.getTupleType(gpa, pt.tid, .{
+ return Air.internedToRef(try zcu.intern_pool.getTupleType(gpa, io, pt.tid, .{
.types = types,
.values = inits,
}));
@@ -2911,7 +2916,11 @@ fn validateTupleFieldType(
fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: usize, captures_len: u32) ![]InternPool.CaptureValue {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const parent_ty: Type = .fromInterned(zcu.namespacePtr(block.namespace).owner_type);
const parent_captures: InternPool.CaptureValue.Slice = parent_ty.getCaptures(zcu);
@@ -2934,7 +2943,7 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us
};
const loaded_val = try sema.resolveLazyValue(unresolved_loaded_val);
if (loaded_val.canMutateComptimeVarState(zcu)) {
- const field_name = try ip.getOrPutString(zcu.gpa, pt.tid, zir_name_slice, .no_embedded_nulls);
+ const field_name = try ip.getOrPutString(gpa, io, pt.tid, zir_name_slice, .no_embedded_nulls);
return sema.failWithContainsReferenceToComptimeVar(block, type_src, field_name, "captured value", loaded_val);
}
break :capture .{ .@"comptime" = loaded_val.toIntern() };
@@ -2943,7 +2952,7 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us
const air_ref = try sema.resolveInst(inst.toRef());
if (try sema.resolveValueResolveLazy(air_ref)) |val| {
if (val.canMutateComptimeVarState(zcu)) {
- const field_name = try ip.getOrPutString(zcu.gpa, pt.tid, zir_name_slice, .no_embedded_nulls);
+ const field_name = try ip.getOrPutString(gpa, io, pt.tid, zir_name_slice, .no_embedded_nulls);
return sema.failWithContainsReferenceToComptimeVar(block, type_src, field_name, "captured value", val);
}
break :capture .{ .@"comptime" = val.toIntern() };
@@ -2952,7 +2961,8 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us
}),
.decl_val => |str| capture: {
const decl_name = try ip.getOrPutString(
- sema.gpa,
+ gpa,
+ io,
pt.tid,
sema.code.nullTerminatedString(str),
.no_embedded_nulls,
@@ -2962,7 +2972,8 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us
},
.decl_ref => |str| capture: {
const decl_name = try ip.getOrPutString(
- sema.gpa,
+ gpa,
+ io,
pt.tid,
sema.code.nullTerminatedString(str),
.no_embedded_nulls,
@@ -2984,8 +2995,11 @@ fn zirStructDecl(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
const extra = sema.code.extraData(Zir.Inst.StructDecl, extended.operand);
@@ -3040,7 +3054,7 @@ fn zirStructDecl(
.captures = captures,
} },
};
- const wip_ty = switch (try ip.getStructType(gpa, pt.tid, struct_init, false)) {
+ const wip_ty = switch (try ip.getStructType(gpa, io, pt.tid, struct_init, false)) {
.existing => |ty| {
const new_ty = try pt.ensureTypeUpToDate(ty);
@@ -3108,7 +3122,9 @@ pub fn createTypeName(
} {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
switch (name_strategy) {
@@ -3158,7 +3174,7 @@ pub fn createTypeName(
w.writeByte(')') catch return error.OutOfMemory;
return .{
- .name = try ip.getOrPutString(gpa, pt.tid, aw.written(), .no_embedded_nulls),
+ .name = try ip.getOrPutString(gpa, io, pt.tid, aw.written(), .no_embedded_nulls),
.nav = .none,
};
},
@@ -3170,7 +3186,7 @@ pub fn createTypeName(
for (@intFromEnum(inst.?)..zir_tags.len) |i| switch (zir_tags[i]) {
.dbg_var_ptr, .dbg_var_val => if (zir_data[i].str_op.operand == ref) {
return .{
- .name = try ip.getOrPutStringFmt(gpa, pt.tid, "{f}.{s}", .{
+ .name = try ip.getOrPutStringFmt(gpa, io, pt.tid, "{f}.{s}", .{
block.type_name_ctx.fmt(ip), zir_data[i].str_op.getStr(sema.code),
}, .no_embedded_nulls),
.nav = .none,
@@ -3193,7 +3209,7 @@ pub fn createTypeName(
// that builtin from the language, we can consider this.
return .{
- .name = try ip.getOrPutStringFmt(gpa, pt.tid, "{f}__{s}_{d}", .{
+ .name = try ip.getOrPutStringFmt(gpa, io, pt.tid, "{f}__{s}_{d}", .{
block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(type_index),
}, .no_embedded_nulls),
.nav = .none,
@@ -3211,8 +3227,11 @@ fn zirEnumDecl(
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
const extra = sema.code.extraData(Zir.Inst.EnumDecl, extended.operand);
var extra_index: usize = extra.end;
@@ -3281,7 +3300,7 @@ fn zirEnumDecl(
.captures = captures,
} },
};
- const wip_ty = switch (try ip.getEnumType(gpa, pt.tid, enum_init, false)) {
+ const wip_ty = switch (try ip.getEnumType(gpa, io, pt.tid, enum_init, false)) {
.existing => |ty| {
const new_ty = try pt.ensureTypeUpToDate(ty);
@@ -3380,8 +3399,11 @@ fn zirUnionDecl(
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
const extra = sema.code.extraData(Zir.Inst.UnionDecl, extended.operand);
var extra_index: usize = extra.end;
@@ -3438,7 +3460,7 @@ fn zirUnionDecl(
.captures = captures,
} },
};
- const wip_ty = switch (try ip.getUnionType(gpa, pt.tid, union_init, false)) {
+ const wip_ty = switch (try ip.getUnionType(gpa, io, pt.tid, union_init, false)) {
.existing => |ty| {
const new_ty = try pt.ensureTypeUpToDate(ty);
@@ -3503,7 +3525,9 @@ fn zirOpaqueDecl(
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small);
@@ -3532,7 +3556,7 @@ fn zirOpaqueDecl(
.zir_index = tracked_inst,
.captures = captures,
};
- const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, opaque_init)) {
+ const wip_ty = switch (try ip.getOpaqueType(gpa, io, pt.tid, opaque_init)) {
.existing => |ty| {
// Make sure we update the namespace if the declaration is re-analyzed, to pick
// up on e.g. changed comptime decls.
@@ -3587,7 +3611,10 @@ fn zirErrorSetDecl(
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index);
@@ -3599,7 +3626,7 @@ fn zirErrorSetDecl(
while (extra_index < extra_index_end) : (extra_index += 1) {
const name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]);
const name = sema.code.nullTerminatedString(name_index);
- const name_ip = try zcu.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls);
+ const name_ip = try zcu.intern_pool.getOrPutString(gpa, io, pt.tid, name, .no_embedded_nulls);
_ = try pt.getErrorValue(name_ip);
const result = names.getOrPutAssumeCapacity(name_ip);
assert(!result.found_existing); // verified in AstGen
@@ -3761,11 +3788,14 @@ fn indexablePtrLen(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const object_ty = sema.typeOf(object);
const is_pointer_to = object_ty.isSinglePointer(zcu);
const indexable_ty = if (is_pointer_to) object_ty.childType(zcu) else object_ty;
try sema.checkIndexable(block, src, indexable_ty);
- const field_name = try zcu.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls);
+ const field_name = try zcu.intern_pool.getOrPutString(gpa, io, pt.tid, "len", .no_embedded_nulls);
return sema.fieldVal(block, src, object, field_name, src);
}
@@ -3777,13 +3807,16 @@ fn indexablePtrLenOrNone(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const operand_ty = sema.typeOf(operand);
try checkMemOperand(sema, block, src, operand_ty);
switch (operand_ty.ptrSize(zcu)) {
.many, .c => return .none,
.one, .slice => {},
}
- const field_name = try zcu.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls);
+ const field_name = try zcu.intern_pool.getOrPutString(gpa, io, pt.tid, "len", .no_embedded_nulls);
return sema.fieldVal(block, src, operand, field_name, src);
}
@@ -3961,6 +3994,9 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, resolved_alloc_ty: ?Type) CompileError!?InternPool.Index {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const alloc_ty = resolved_alloc_ty orelse sema.typeOf(alloc);
const ptr_info = alloc_ty.ptrInfo(zcu);
@@ -4108,7 +4144,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
};
const new_ptr_ty = tmp_air.typeOfIndex(air_ptr, &zcu.intern_pool).toIntern();
const new_ptr = switch (method) {
- .same_addr => try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, decl_parent_ptr, new_ptr_ty),
+ .same_addr => try zcu.intern_pool.getCoerced(gpa, io, pt.tid, decl_parent_ptr, new_ptr_ty),
.opt_payload => ptr: {
// Set the optional to non-null at comptime.
// If the payload is OPV, we must use that value instead of undef.
@@ -4523,8 +4559,11 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
const all_args = sema.code.refSlice(extra.end, extra.data.operands_len);
@@ -4570,7 +4609,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return sema.failWithOwnedErrorMsg(block, msg);
}
if (!object_ty.indexableHasLen(zcu)) continue;
- break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), arg_src);
+ break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, io, pt.tid, "len", .no_embedded_nulls), arg_src);
} else l: {
// This argument is a range.
const range_start = try sema.resolveInst(zir_arg_pair[0]);
@@ -4733,6 +4772,10 @@ fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
fn zirTryOperandTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(un_node.src_node);
@@ -4758,7 +4801,7 @@ fn zirTryOperandTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: boo
// This function cannot return an error.
// `try` is still valid if the error case is impossible, i.e. no error is returned.
// So, the result type has an error set of `error{}`.
- break :err_set .fromInterned(try zcu.intern_pool.getErrorSetType(zcu.gpa, pt.tid, &.{}));
+ break :err_set .fromInterned(try zcu.intern_pool.getErrorSetType(gpa, io, pt.tid, &.{}));
},
}
}
@@ -5003,7 +5046,9 @@ fn validateStructInit(
) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
// Tracks whether each field was explicitly initialized.
@@ -5017,6 +5062,7 @@ fn validateStructInit(
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
const field_name = try ip.getOrPutString(
gpa,
+ io,
pt.tid,
sema.code.nullTerminatedString(field_ptr_extra.field_name_start),
.no_embedded_nulls,
@@ -5461,9 +5507,15 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v
}
fn zirStr(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const pt = sema.pt;
+ const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const ip = &zcu.intern_pool;
const bytes = sema.code.instructions.items(.data)[@intFromEnum(inst)].str.get(sema.code);
return sema.addStrLit(
- try sema.pt.zcu.intern_pool.getOrPutString(sema.gpa, sema.pt.tid, bytes, .maybe_embedded_nulls),
+ try ip.getOrPutString(gpa, io, pt.tid, bytes, .maybe_embedded_nulls),
bytes.len,
);
}
@@ -5555,7 +5607,9 @@ fn zirCompileLog(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
var aw: std.Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
@@ -5579,7 +5633,7 @@ fn zirCompileLog(
}
}
- const line_data = try zcu.intern_pool.getOrPutString(gpa, pt.tid, aw.written(), .no_embedded_nulls);
+ const line_data = try zcu.intern_pool.getOrPutString(gpa, io, pt.tid, aw.written(), .no_embedded_nulls);
const line_idx: Zcu.CompileLogLine.Index = if (zcu.free_compile_log_lines.pop()) |idx| idx: {
zcu.compile_log_lines.items[@intFromEnum(idx)] = .{
@@ -5757,7 +5811,9 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
- const gpa = sema.gpa;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = parent_block.nodeOffset(pl_node.src_node);
const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index);
@@ -5846,7 +5902,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
errdefer c_import_file_path.deinit(gpa);
const c_import_file = try gpa.create(Zcu.File);
errdefer gpa.destroy(c_import_file);
- const c_import_file_index = try zcu.intern_pool.createFile(gpa, pt.tid, .{
+ const c_import_file_index = try zcu.intern_pool.createFile(gpa, io, pt.tid, .{
.bin_digest = c_import_file_path.digest(),
.file = c_import_file,
.root_type = .none,
@@ -5959,7 +6015,7 @@ fn resolveBlockBody(
assert(sema.air_instructions.items(.tag)[@intFromEnum(merges.block_inst)] == .block);
var need_debug_scope = false;
child_block.need_debug_scope = &need_debug_scope;
- if (sema.analyzeBodyInner(child_block, body)) |_| {
+ if (sema.analyzeBodyInner(child_block, body)) {
return sema.resolveAnalyzedBlock(parent_block, src, child_block, merges, need_debug_scope);
} else |err| switch (err) {
error.ComptimeBreak => {
@@ -6350,6 +6406,7 @@ pub fn analyzeExport(
fn zirDisableInstrumentation(sema: *Sema) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
+ const io = zcu.comp.io;
const ip = &zcu.intern_pool;
const func = switch (sema.owner.unwrap()) {
.func => |func| func,
@@ -6360,13 +6417,14 @@ fn zirDisableInstrumentation(sema: *Sema) CompileError!void {
.memoized_state,
=> return, // does nothing outside a function
};
- ip.funcSetDisableInstrumentation(func);
+ ip.funcSetDisableInstrumentation(io, func);
sema.allow_memoize = false;
}
fn zirDisableIntrinsics(sema: *Sema) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
+ const io = zcu.comp.io;
const ip = &zcu.intern_pool;
const func = switch (sema.owner.unwrap()) {
.func => |func| func,
@@ -6377,7 +6435,7 @@ fn zirDisableIntrinsics(sema: *Sema) CompileError!void {
.memoized_state,
=> return, // does nothing outside a function
};
- ip.funcSetDisableIntrinsics(func);
+ ip.funcSetDisableIntrinsics(io, func);
sema.allow_memoize = false;
}
@@ -6576,10 +6634,15 @@ pub fn appendAirString(sema: *Sema, str: []const u8) Allocator.Error!Air.NullTer
fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
const src = block.tokenOffset(inst_data.src_tok);
const decl_name = try zcu.intern_pool.getOrPutString(
- sema.gpa,
+ gpa,
+ io,
pt.tid,
inst_data.get(sema.code),
.no_embedded_nulls,
@@ -6591,10 +6654,15 @@ fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
const src = block.tokenOffset(inst_data.src_tok);
const decl_name = try zcu.intern_pool.getOrPutString(
- sema.gpa,
+ gpa,
+ io,
pt.tid,
inst_data.get(sema.code),
.no_embedded_nulls,
@@ -6683,7 +6751,9 @@ fn funcDeclSrcInst(sema: *Sema, func_inst: Air.Inst.Ref) !?InternPool.TrackedIns
pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
if (block.isComptime() or block.is_typeof) {
const index_val = try pt.intValue_u64(.usize, sema.comptime_err_ret_trace.items.len);
@@ -6694,7 +6764,7 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref
const stack_trace_ty = try sema.getBuiltinType(block.nodeOffset(.zero), .StackTrace);
try stack_trace_ty.resolveFields(pt);
- const field_name = try zcu.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls);
+ const field_name = try zcu.intern_pool.getOrPutString(gpa, io, pt.tid, "index", .no_embedded_nulls);
const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, LazySrcLoc.unneeded) catch |err| switch (err) {
error.AnalysisFail => @panic("std.builtin.StackTrace is corrupt"),
error.ComptimeReturn, error.ComptimeBreak => unreachable,
@@ -6721,7 +6791,9 @@ fn popErrorReturnTrace(
) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
var is_non_error: ?bool = null;
var is_non_error_inst: Air.Inst.Ref = undefined;
if (operand != .none) {
@@ -6738,7 +6810,7 @@ fn popErrorReturnTrace(
try stack_trace_ty.resolveFields(pt);
const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty);
const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
- const field_name = try zcu.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls);
+ const field_name = try zcu.intern_pool.getOrPutString(gpa, io, pt.tid, "index", .no_embedded_nulls);
const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true);
try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store);
} else if (is_non_error == null) {
@@ -6764,7 +6836,7 @@ fn popErrorReturnTrace(
try stack_trace_ty.resolveFields(pt);
const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty);
const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty);
- const field_name = try zcu.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls);
+ const field_name = try zcu.intern_pool.getOrPutString(gpa, io, pt.tid, "index", .no_embedded_nulls);
const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true);
try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store);
_ = try then_block.addBr(cond_block_inst, .void_value);
@@ -6818,6 +6890,10 @@ fn zirCall(
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const callee_src = block.src(.{ .node_offset_call_func = inst_data.src_node });
const call_src = block.nodeOffset(inst_data.src_node);
@@ -6837,7 +6913,8 @@ fn zirCall(
.field => blk: {
const object_ptr = try sema.resolveInst(extra.data.obj_ptr);
const field_name = try zcu.intern_pool.getOrPutString(
- sema.gpa,
+ gpa,
+ io,
pt.tid,
sema.code.nullTerminatedString(extra.data.field_name_start),
.no_embedded_nulls,
@@ -6897,7 +6974,7 @@ fn zirCall(
if (input_is_error or (pop_error_return_trace and return_ty.isError(zcu))) {
const stack_trace_ty = try sema.getBuiltinType(call_src, .StackTrace);
try stack_trace_ty.resolveFields(pt);
- const field_name = try zcu.intern_pool.getOrPutString(sema.gpa, pt.tid, "index", .no_embedded_nulls);
+ const field_name = try zcu.intern_pool.getOrPutString(gpa, io, pt.tid, "index", .no_embedded_nulls);
const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src);
// Insert a save instruction before the arg resolution + call instructions we just generated
@@ -7232,7 +7309,9 @@ fn analyzeCall(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const arena = sema.arena;
@@ -7544,7 +7623,7 @@ fn analyzeCall(
if (func_ty_info.cc == .auto) {
switch (sema.owner.unwrap()) {
.@"comptime", .nav_ty, .nav_val, .type, .memoized_state => {},
- .func => |owner_func| ip.funcSetHasErrorTrace(owner_func, true),
+ .func => |owner_func| ip.funcSetHasErrorTrace(io, owner_func, true),
}
}
for (args, 0..) |arg, arg_idx| {
@@ -7596,7 +7675,7 @@ fn analyzeCall(
} else resolved_ret_ty;
// We now need to actually create the function instance.
- const func_instance = try ip.getFuncInstance(gpa, pt.tid, .{
+ const func_instance = try ip.getFuncInstance(gpa, io, pt.tid, .{
.param_types = runtime_param_tys.items,
.noalias_bits = noalias_bits,
.bare_return_type = bare_ret_ty.toIntern(),
@@ -7614,7 +7693,7 @@ fn analyzeCall(
// This call is problematic as it breaks guarantees about order-independency of semantic analysis.
// These guarantees are necessary for incremental compilation and parallel semantic analysis.
// See: #22410
- zcu.funcInfo(func_instance).maxBranchQuota(ip, sema.branch_quota);
+ zcu.funcInfo(func_instance).maxBranchQuota(ip, io, sema.branch_quota);
break :func .{ Air.internedToRef(func_instance), runtime_args.items };
};
@@ -8102,6 +8181,9 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
@@ -8116,7 +8198,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
const sentinel = try sema.coerce(block, elem_type, uncasted_sentinel, sentinel_src);
const sentinel_val = try sema.resolveConstDefinedValue(block, sentinel_src, sentinel, .{ .simple = .array_sentinel });
if (sentinel_val.canMutateComptimeVarState(zcu)) {
- const sentinel_name = try ip.getOrPutString(sema.gpa, pt.tid, "sentinel", .no_embedded_nulls);
+ const sentinel_name = try ip.getOrPutString(gpa, io, pt.tid, "sentinel", .no_embedded_nulls);
return sema.failWithContainsReferenceToComptimeVar(block, sentinel_src, sentinel_name, "sentinel", sentinel_val);
}
const array_ty = try pt.arrayType(.{
@@ -8194,10 +8276,17 @@ fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, p
fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
_ = block;
+
const pt = sema.pt;
+ const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
const name = try pt.zcu.intern_pool.getOrPutString(
- sema.gpa,
+ gpa,
+ io,
pt.tid,
inst_data.get(sema.code),
.no_embedded_nulls,
@@ -8259,7 +8348,9 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
const pt = sema.pt;
const zcu = pt.zcu;
+ const io = zcu.comp.io;
const ip = &zcu.intern_pool;
+
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = block.nodeOffset(extra.node);
const operand_src = block.builtinCallArgSrc(extra.node, 0);
@@ -8271,8 +8362,8 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(pt));
if (int > len: {
const mutate = &ip.global_error_set.mutate;
- mutate.map.mutex.lock();
- defer mutate.map.mutex.unlock();
+ mutate.map.mutex.lockUncancelable(io);
+ defer mutate.map.mutex.unlock(io);
break :len mutate.names.len;
} or int == 0)
return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int});
@@ -8361,10 +8452,14 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
const name = inst_data.get(sema.code);
return Air.internedToRef((try pt.intern(.{
- .enum_literal = try zcu.intern_pool.getOrPutString(sema.gpa, pt.tid, name, .no_embedded_nulls),
+ .enum_literal = try zcu.intern_pool.getOrPutString(gpa, io, pt.tid, name, .no_embedded_nulls),
})));
}
@@ -8374,11 +8469,16 @@ fn zirDeclLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index, do_coerce: b
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
const name = try zcu.intern_pool.getOrPutString(
- sema.gpa,
+ gpa,
+ io,
pt.tid,
sema.code.nullTerminatedString(extra.field_name_start),
.no_embedded_nulls,
@@ -8915,7 +9015,11 @@ fn zirFunc(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index);
const target = zcu.getTarget();
@@ -8970,7 +9074,7 @@ fn zirFunc(
block,
LazySrcLoc.unneeded,
cc_type.getNamespaceIndex(zcu),
- try ip.getOrPutString(sema.gpa, pt.tid, "c", .no_embedded_nulls),
+ try ip.getOrPutString(gpa, io, pt.tid, "c", .no_embedded_nulls),
);
// The above should have errored.
@panic("std.builtin is corrupt");
@@ -9443,8 +9547,11 @@ fn funcCommon(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const ret_ty_src = block.src(.{ .node_offset_fn_type_ret_ty = src_node_offset });
const cc_src = block.src(.{ .node_offset_fn_type_cc = src_node_offset });
const func_src = block.nodeOffset(src_node_offset);
@@ -9563,7 +9670,7 @@ fn funcCommon(
if (inferred_error_set) {
assert(has_body);
- return .fromIntern(try ip.getFuncDeclIes(gpa, pt.tid, .{
+ return .fromIntern(try ip.getFuncDeclIes(gpa, io, pt.tid, .{
.owner_nav = sema.owner.unwrap().nav_val,
.param_types = param_types,
@@ -9583,7 +9690,7 @@ fn funcCommon(
}));
}
- const func_ty = try ip.getFuncType(gpa, pt.tid, .{
+ const func_ty = try ip.getFuncType(gpa, io, pt.tid, .{
.param_types = param_types,
.noalias_bits = noalias_bits,
.comptime_bits = comptime_bits,
@@ -9595,7 +9702,7 @@ fn funcCommon(
});
if (has_body) {
- return .fromIntern(try ip.getFuncDecl(gpa, pt.tid, .{
+ return .fromIntern(try ip.getFuncDecl(gpa, io, pt.tid, .{
.owner_nav = sema.owner.unwrap().nav_val,
.ty = func_ty,
.cc = cc,
@@ -9778,12 +9885,17 @@ fn zirFieldPtrLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const field_name_src = block.src(.{ .node_offset_field_name = inst_data.src_node });
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
const field_name = try zcu.intern_pool.getOrPutString(
- sema.gpa,
+ gpa,
+ io,
pt.tid,
sema.code.nullTerminatedString(extra.field_name_start),
.no_embedded_nulls,
@@ -9798,12 +9910,17 @@ fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const field_name_src = block.src(.{ .node_offset_field_name = inst_data.src_node });
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
const field_name = try zcu.intern_pool.getOrPutString(
- sema.gpa,
+ gpa,
+ io,
pt.tid,
sema.code.nullTerminatedString(extra.field_name_start),
.no_embedded_nulls,
@@ -9818,12 +9935,17 @@ fn zirStructInitFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const field_name_src = block.src(.{ .node_offset_field_name_init = inst_data.src_node });
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
const field_name = try zcu.intern_pool.getOrPutString(
- sema.gpa,
+ gpa,
+ io,
pt.tid,
sema.code.nullTerminatedString(extra.field_name_start),
.no_embedded_nulls,
@@ -13941,9 +14063,14 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
const name = try zcu.intern_pool.getOrPutString(
- sema.gpa,
+ gpa,
+ io,
pt.tid,
inst_data.get(sema.code),
.no_embedded_nulls,
@@ -14379,6 +14506,10 @@ fn analyzeTupleCat(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
const src = block.nodeOffset(src_node);
@@ -14434,7 +14565,7 @@ fn analyzeTupleCat(
break :rs runtime_src;
};
- const tuple_ty: Type = .fromInterned(try zcu.intern_pool.getTupleType(zcu.gpa, pt.tid, .{
+ const tuple_ty: Type = .fromInterned(try zcu.intern_pool.getTupleType(gpa, io, pt.tid, .{
.types = types,
.values = values,
}));
@@ -14821,6 +14952,10 @@ fn analyzeTupleMul(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const operand_ty = sema.typeOf(operand);
const src = block.nodeOffset(src_node);
const len_src = block.src(.{ .node_offset_bin_rhs = src_node });
@@ -14856,7 +14991,7 @@ fn analyzeTupleMul(
break :rs runtime_src;
};
- const tuple_ty: Type = .fromInterned(try zcu.intern_pool.getTupleType(zcu.gpa, pt.tid, .{
+ const tuple_ty: Type = .fromInterned(try zcu.intern_pool.getTupleType(gpa, io, pt.tid, .{
.types = types,
.values = values,
}));
@@ -16388,7 +16523,11 @@ fn zirAsm(
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand);
const src = block.nodeOffset(extra.data.src_node);
const ret_ty_src = block.src(.{ .node_offset_asm_ret_ty = extra.data.src_node });
@@ -16445,7 +16584,7 @@ fn zirAsm(
} else {
const inst = try sema.resolveInst(output.data.operand);
if (!sema.checkRuntimeValue(inst)) {
- const output_name = try ip.getOrPutString(sema.gpa, pt.tid, name, .no_embedded_nulls);
+ const output_name = try ip.getOrPutString(gpa, io, pt.tid, name, .no_embedded_nulls);
return sema.failWithContainsReferenceToComptimeVar(block, output_src, output_name, "assembly output", .fromInterned(inst.toInterned().?));
}
arg.* = inst;
@@ -16476,7 +16615,7 @@ fn zirAsm(
const uncasted_arg = try sema.resolveInst(input.data.operand);
const name = sema.code.nullTerminatedString(input.data.name);
if (!sema.checkRuntimeValue(uncasted_arg)) {
- const input_name = try ip.getOrPutString(sema.gpa, pt.tid, name, .no_embedded_nulls);
+ const input_name = try ip.getOrPutString(gpa, io, pt.tid, name, .no_embedded_nulls);
return sema.failWithContainsReferenceToComptimeVar(block, input_src, input_name, "assembly input", .fromInterned(uncasted_arg.toInterned().?));
}
const uncasted_arg_ty = sema.typeOf(uncasted_arg);
@@ -16500,7 +16639,6 @@ fn zirAsm(
const clobbers_val = try sema.resolveConstDefinedValue(block, src, clobbers, .{ .simple = .clobber });
needed_capacity += asm_source.len / 4 + 1;
- const gpa = sema.gpa;
try sema.air_extra.ensureUnusedCapacity(gpa, needed_capacity);
const asm_air = try block.addInst(.{
.tag = .assembly,
@@ -17060,10 +17198,13 @@ fn zirBuiltinSrc(
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data;
const fn_name = ip.getNav(zcu.funcInfo(sema.func_index).owner_nav).name;
- const gpa = sema.gpa;
const file_scope = block.getFileScope(zcu);
const func_name_val = v: {
@@ -17106,7 +17247,7 @@ fn zirBuiltinSrc(
.val = try pt.intern(.{ .aggregate = .{
.ty = array_ty,
.storage = .{
- .bytes = try ip.getOrPutString(gpa, pt.tid, module_name, .maybe_embedded_nulls),
+ .bytes = try ip.getOrPutString(gpa, io, pt.tid, module_name, .maybe_embedded_nulls),
},
} }),
} },
@@ -17132,7 +17273,7 @@ fn zirBuiltinSrc(
.val = try pt.intern(.{ .aggregate = .{
.ty = array_ty,
.storage = .{
- .bytes = try ip.getOrPutString(gpa, pt.tid, file_name, .maybe_embedded_nulls),
+ .bytes = try ip.getOrPutString(gpa, io, pt.tid, file_name, .maybe_embedded_nulls),
},
} }),
} },
@@ -17161,8 +17302,11 @@ fn zirBuiltinSrc(
fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const ty = try sema.resolveType(block, src, inst_data.operand);
@@ -17511,7 +17655,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const enum_type = ip.loadEnumType(ty.toIntern());
const value_val = if (enum_type.values.len > 0)
try ip.getCoercedInts(
- zcu.gpa,
+ gpa,
+ io,
pt.tid,
ip.indexToKey(enum_type.values.get(ip)[tag_index]).int,
.comptime_int_type,
@@ -17729,7 +17874,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_ty = tuple_type.types.get(ip)[field_index];
const field_val = tuple_type.values.get(ip)[field_index];
const name_val = v: {
- const field_name = try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
+ const field_name = try ip.getOrPutStringFmt(gpa, io, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
const field_name_len = field_name.length(ip);
const new_decl_ty = try pt.arrayType(.{
.len = field_name_len,
@@ -18752,10 +18897,15 @@ fn zirRetErrValue(
) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok;
const src = block.tokenOffset(inst_data.src_tok);
const err_name = try zcu.intern_pool.getOrPutString(
- sema.gpa,
+ gpa,
+ io,
pt.tid,
inst_data.get(sema.code),
.no_embedded_nulls,
@@ -19121,6 +19271,9 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].ptr_type;
@@ -19158,7 +19311,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const val = try sema.resolveConstDefinedValue(block, sentinel_src, coerced, .{ .simple = .pointer_sentinel });
try checkSentinelType(sema, block, sentinel_src, elem_ty);
if (val.canMutateComptimeVarState(zcu)) {
- const sentinel_name = try ip.getOrPutString(sema.gpa, pt.tid, "sentinel", .no_embedded_nulls);
+ const sentinel_name = try ip.getOrPutString(gpa, io, pt.tid, "sentinel", .no_embedded_nulls);
return sema.failWithContainsReferenceToComptimeVar(block, sentinel_src, sentinel_name, "sentinel", val);
}
break :blk val.toIntern();
@@ -19463,15 +19616,18 @@ fn zirStructInit(
inst: Zir.Inst.Index,
is_ref: bool,
) CompileError!Air.Inst.Ref {
- const gpa = sema.gpa;
+ const pt = sema.pt;
+ const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const ip = &zcu.intern_pool;
+
const zir_datas = sema.code.instructions.items(.data);
const inst_data = zir_datas[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index);
const src = block.nodeOffset(inst_data.src_node);
- const pt = sema.pt;
- const zcu = pt.zcu;
- const ip = &zcu.intern_pool;
const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data;
const first_field_type_data = zir_datas[@intFromEnum(first_item.field_type)].pl_node;
const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data;
@@ -19513,6 +19669,7 @@ fn zirStructInit(
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
const field_name = try ip.getOrPutString(
gpa,
+ io,
pt.tid,
sema.code.nullTerminatedString(field_type_extra.name_start),
.no_embedded_nulls,
@@ -19554,6 +19711,7 @@ fn zirStructInit(
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
const field_name = try ip.getOrPutString(
gpa,
+ io,
pt.tid,
sema.code.nullTerminatedString(field_type_extra.name_start),
.no_embedded_nulls,
@@ -19797,8 +19955,11 @@ fn structInitAnon(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const zir_datas = sema.code.instructions.items(.data);
const types = try sema.arena.alloc(InternPool.Index, extra_data.fields_len);
@@ -19828,7 +19989,7 @@ fn structInitAnon(
},
};
- field_name.* = try zcu.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls);
+ field_name.* = try zcu.intern_pool.getOrPutString(gpa, io, pt.tid, name, .no_embedded_nulls);
const init = try sema.resolveInst(item.data.init);
field_ty.* = sema.typeOf(init).toIntern();
@@ -19871,7 +20032,7 @@ fn structInitAnon(
break :hash hasher.final();
};
const tracked_inst = try block.trackZir(inst);
- const struct_ty = switch (try ip.getStructType(gpa, pt.tid, .{
+ const struct_ty = switch (try ip.getStructType(gpa, io, pt.tid, .{
.layout = .auto,
.fields_len = extra_data.fields_len,
.known_non_opv = false,
@@ -20131,7 +20292,9 @@ fn arrayInitAnon(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const types = try sema.arena.alloc(InternPool.Index, operands.len);
@@ -20180,7 +20343,7 @@ fn arrayInitAnon(
break :blk new_values;
};
- const tuple_ty: Type = .fromInterned(try ip.getTupleType(gpa, pt.tid, .{
+ const tuple_ty: Type = .fromInterned(try ip.getTupleType(gpa, io, pt.tid, .{
.types = types,
.values = values_no_comptime,
}));
@@ -20247,7 +20410,11 @@ fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
fn zirStructInitFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data;
const ty_src = block.nodeOffset(inst_data.src_node);
@@ -20255,7 +20422,7 @@ fn zirStructInitFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
const wrapped_aggregate_ty = try sema.resolveTypeOrPoison(block, ty_src, extra.container_type) orelse return .generic_poison_type;
const aggregate_ty = wrapped_aggregate_ty.optEuBaseType(zcu);
const zir_field_name = sema.code.nullTerminatedString(extra.name_start);
- const field_name = try ip.getOrPutString(sema.gpa, pt.tid, zir_field_name, .no_embedded_nulls);
+ const field_name = try ip.getOrPutString(gpa, io, pt.tid, zir_field_name, .no_embedded_nulls);
return sema.fieldType(block, aggregate_ty, field_name, field_name_src, ty_src);
}
@@ -20669,6 +20836,9 @@ fn zirReifyTuple(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const operand_src = block.builtinCallArgSrc(extra.node, 0);
@@ -20691,7 +20861,7 @@ fn zirReifyTuple(
const field_values = try sema.arena.alloc(InternPool.Index, fields_len);
@memset(field_values, .none);
- return .fromIntern(try zcu.intern_pool.getTupleType(zcu.gpa, pt.tid, .{
+ return .fromIntern(try zcu.intern_pool.getTupleType(gpa, io, pt.tid, .{
.types = field_types,
.values = field_values,
}));
@@ -20704,7 +20874,9 @@ fn zirReifyPointer(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const extra = sema.code.extraData(Zir.Inst.ReifyPointer, extended.operand).data;
@@ -20772,7 +20944,7 @@ fn zirReifyPointer(
}
try checkSentinelType(sema, block, sentinel_src, elem_ty);
if (sentinel.canMutateComptimeVarState(zcu)) {
- const sentinel_name = try ip.getOrPutString(gpa, pt.tid, "sentinel", .no_embedded_nulls);
+ const sentinel_name = try ip.getOrPutString(gpa, io, pt.tid, "sentinel", .no_embedded_nulls);
return sema.failWithContainsReferenceToComptimeVar(block, sentinel_src, sentinel_name, "sentinel", sentinel);
}
}
@@ -20801,7 +20973,9 @@ fn zirReifyFn(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const extra = sema.code.extraData(Zir.Inst.ReifyFn, extended.operand).data;
@@ -20884,7 +21058,7 @@ fn zirReifyFn(
return sema.fail(block, param_attrs_src, "cannot reify function type with comptime-only return type '{f}'", .{ret_ty.fmt(pt)});
}
- return .fromIntern(try ip.getFuncType(gpa, pt.tid, .{
+ return .fromIntern(try ip.getFuncType(gpa, io, pt.tid, .{
.param_types = param_types_ip,
.noalias_bits = noalias_bits,
.comptime_bits = 0,
@@ -20904,7 +21078,9 @@ fn zirReifyStruct(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const name_strategy: Zir.Inst.NameStrategy = @enumFromInt(extended.small);
@@ -21079,7 +21255,7 @@ fn zirReifyStruct(
return sema.fail(block, field_attrs_src, "{t} struct fields cannot be marked comptime", .{layout});
}
- const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{
+ const wip_ty = switch (try ip.getStructType(gpa, io, pt.tid, .{
.layout = layout,
.fields_len = @intCast(fields_len),
.known_non_opv = false,
@@ -21223,10 +21399,10 @@ fn zirReifyStruct(
}
if (backing_int_ty) |ty| {
try sema.checkBackingIntType(block, src, ty, fields_bit_sum);
- wip_struct_type.setBackingIntType(ip, ty.toIntern());
+ wip_struct_type.setBackingIntType(ip, io, ty.toIntern());
} else {
const ty = try pt.intType(.unsigned, @intCast(fields_bit_sum));
- wip_struct_type.setBackingIntType(ip, ty.toIntern());
+ wip_struct_type.setBackingIntType(ip, io, ty.toIntern());
}
}
@@ -21259,7 +21435,9 @@ fn zirReifyUnion(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const name_strategy: Zir.Inst.NameStrategy = @enumFromInt(extended.small);
@@ -21400,7 +21578,7 @@ fn zirReifyUnion(
return sema.fail(block, field_attrs_src, "packed union fields cannot be aligned", .{});
}
- const wip_ty = switch (try ip.getUnionType(gpa, pt.tid, .{
+ const wip_ty = switch (try ip.getUnionType(gpa, io, pt.tid, .{
.flags = .{
.layout = layout,
.status = .none,
@@ -21558,8 +21736,8 @@ fn zirReifyUnion(
}
}
- loaded_union.setTagType(ip, enum_tag_ty);
- loaded_union.setStatus(ip, .have_field_types);
+ loaded_union.setTagType(ip, io, enum_tag_ty);
+ loaded_union.setStatus(ip, io, .have_field_types);
const new_namespace_index = try pt.createNamespace(.{
.parent = block.namespace.toOptional(),
@@ -21590,7 +21768,9 @@ fn zirReifyEnum(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const name_strategy: Zir.Inst.NameStrategy = @enumFromInt(extended.small);
@@ -21688,7 +21868,7 @@ fn zirReifyEnum(
std.hash.autoHash(&hasher, field_name);
}
- const wip_ty = switch (try ip.getEnumType(gpa, pt.tid, .{
+ const wip_ty = switch (try ip.getEnumType(gpa, io, pt.tid, .{
.has_values = true,
.tag_mode = if (nonexhaustive) .nonexhaustive else .explicit,
.fields_len = @intCast(fields_len),
@@ -21844,13 +22024,16 @@ fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData)
fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const ty = try sema.resolveType(block, ty_src, inst_data.operand);
- const type_name = try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{f}", .{ty.fmt(pt)}, .no_embedded_nulls);
+ const type_name = try ip.getOrPutStringFmt(gpa, io, pt.tid, "{f}", .{ty.fmt(pt)}, .no_embedded_nulls);
return sema.addNullTerminatedStrLit(type_name);
}
@@ -22281,6 +22464,10 @@ fn ptrCastFull(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const operand_ty = sema.typeOf(operand);
try sema.checkPtrType(block, src, dest_ty, true);
@@ -22452,14 +22639,14 @@ fn ptrCastFull(
if (dest_info.sentinel == .none) break :check_sent;
if (src_info.flags.size == .c) break :check_sent;
if (src_info.sentinel != .none) {
- const coerced_sent = try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, src_info.sentinel, dest_info.child);
+ const coerced_sent = try zcu.intern_pool.getCoerced(gpa, io, pt.tid, src_info.sentinel, dest_info.child);
if (dest_info.sentinel == coerced_sent) break :check_sent;
}
if (is_array_ptr_to_slice) {
// [*]nT -> []T
const arr_ty: Type = .fromInterned(src_info.child);
if (arr_ty.sentinel(zcu)) |src_sentinel| {
- const coerced_sent = try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, src_sentinel.toIntern(), dest_info.child);
+ const coerced_sent = try zcu.intern_pool.getCoerced(gpa, io, pt.tid, src_sentinel.toIntern(), dest_info.child);
if (dest_info.sentinel == coerced_sent) break :check_sent;
}
}
@@ -23577,8 +23764,11 @@ fn resolveExportOptions(
) CompileError!Zcu.Export.Options {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const export_options_ty = try sema.getBuiltinType(src, .ExportOptions);
const air_ref = try sema.resolveInst(zir_ref);
const options = try sema.coerce(block, export_options_ty, air_ref, src);
@@ -23588,21 +23778,21 @@ fn resolveExportOptions(
const section_src = block.src(.{ .init_field_section = src.offset.node_offset_builtin_call_arg.builtin_call_node });
const visibility_src = block.src(.{ .init_field_visibility = src.offset.node_offset_builtin_call_arg.builtin_call_node });
- const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), name_src);
+ const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "name", .no_embedded_nulls), name_src);
const name = try sema.toConstString(block, name_src, name_operand, .{ .simple = .export_options });
- const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "linkage", .no_embedded_nulls), linkage_src);
+ const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "linkage", .no_embedded_nulls), linkage_src);
const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_operand, .{ .simple = .export_options });
const linkage = try sema.interpretBuiltinType(block, linkage_src, linkage_val, std.builtin.GlobalLinkage);
- const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "section", .no_embedded_nulls), section_src);
+ const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "section", .no_embedded_nulls), section_src);
const section_opt_val = try sema.resolveConstDefinedValue(block, section_src, section_operand, .{ .simple = .export_options });
const section = if (section_opt_val.optionalValue(zcu)) |section_val|
try sema.toConstString(block, section_src, Air.internedToRef(section_val.toIntern()), .{ .simple = .export_options })
else
null;
- const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "visibility", .no_embedded_nulls), visibility_src);
+ const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "visibility", .no_embedded_nulls), visibility_src);
const visibility_val = try sema.resolveConstDefinedValue(block, visibility_src, visibility_operand, .{ .simple = .export_options });
const visibility = try sema.interpretBuiltinType(block, visibility_src, visibility_val, std.builtin.SymbolVisibility);
@@ -23617,9 +23807,9 @@ fn resolveExportOptions(
}
return .{
- .name = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls),
+ .name = try ip.getOrPutString(gpa, io, pt.tid, name, .no_embedded_nulls),
.linkage = linkage,
- .section = try ip.getOrPutStringOpt(gpa, pt.tid, section, .no_embedded_nulls),
+ .section = try ip.getOrPutStringOpt(gpa, io, pt.tid, section, .no_embedded_nulls),
.visibility = visibility,
};
}
@@ -25345,8 +25535,11 @@ fn zirMemcpy(
fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = block.nodeOffset(inst_data.src_node);
@@ -25385,7 +25578,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const elem = try sema.coerce(block, dest_elem_ty, uncoerced_elem, value_src);
const runtime_src = rs: {
- const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), dest_src);
+ const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, io, pt.tid, "len", .no_embedded_nulls), dest_src);
const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src;
const len_u64 = try len_val.toUnsignedIntSema(pt);
const len = try sema.usizeCast(block, dest_src, len_u64);
@@ -25438,7 +25631,11 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
const target = zcu.getTarget();
@@ -25482,7 +25679,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
block,
LazySrcLoc.unneeded,
cc_type.getNamespaceIndex(zcu),
- try ip.getOrPutString(sema.gpa, pt.tid, "c", .no_embedded_nulls),
+ try ip.getOrPutString(gpa, io, pt.tid, "c", .no_embedded_nulls),
);
// The above should have errored.
@panic("std.builtin is corrupt");
@@ -25648,8 +25845,11 @@ fn resolvePrefetchOptions(
) CompileError!std.builtin.PrefetchOptions {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const options_ty = try sema.getBuiltinType(src, .PrefetchOptions);
const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src);
@@ -25657,13 +25857,13 @@ fn resolvePrefetchOptions(
const locality_src = block.src(.{ .init_field_locality = src.offset.node_offset_builtin_call_arg.builtin_call_node });
const cache_src = block.src(.{ .init_field_cache = src.offset.node_offset_builtin_call_arg.builtin_call_node });
- const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "rw", .no_embedded_nulls), rw_src);
+ const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "rw", .no_embedded_nulls), rw_src);
const rw_val = try sema.resolveConstDefinedValue(block, rw_src, rw, .{ .simple = .prefetch_options });
- const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "locality", .no_embedded_nulls), locality_src);
+ const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "locality", .no_embedded_nulls), locality_src);
const locality_val = try sema.resolveConstDefinedValue(block, locality_src, locality, .{ .simple = .prefetch_options });
- const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "cache", .no_embedded_nulls), cache_src);
+ const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "cache", .no_embedded_nulls), cache_src);
const cache_val = try sema.resolveConstDefinedValue(block, cache_src, cache, .{ .simple = .prefetch_options });
return std.builtin.PrefetchOptions{
@@ -25717,8 +25917,11 @@ fn resolveExternOptions(
} {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const options_inst = try sema.resolveInst(zir_ref);
const extern_options_ty = try sema.getBuiltinType(src, .ExternOptions);
const options = try sema.coerce(block, extern_options_ty, options_inst, src);
@@ -25731,21 +25934,21 @@ fn resolveExternOptions(
const dll_import_src = block.src(.{ .init_field_dll_import = src.offset.node_offset_builtin_call_arg.builtin_call_node });
const relocation_src = block.src(.{ .init_field_relocation = src.offset.node_offset_builtin_call_arg.builtin_call_node });
- const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), name_src);
+ const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "name", .no_embedded_nulls), name_src);
const name = try sema.toConstString(block, name_src, name_ref, .{ .simple = .extern_options });
- const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "library_name", .no_embedded_nulls), library_src);
+ const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "library_name", .no_embedded_nulls), library_src);
const library_name_val = try sema.resolveConstDefinedValue(block, library_src, library_name_inst, .{ .simple = .extern_options });
- const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "linkage", .no_embedded_nulls), linkage_src);
+ const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "linkage", .no_embedded_nulls), linkage_src);
const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_ref, .{ .simple = .extern_options });
const linkage = try sema.interpretBuiltinType(block, linkage_src, linkage_val, std.builtin.GlobalLinkage);
- const visibility_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "visibility", .no_embedded_nulls), visibility_src);
+ const visibility_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "visibility", .no_embedded_nulls), visibility_src);
const visibility_val = try sema.resolveConstDefinedValue(block, visibility_src, visibility_ref, .{ .simple = .extern_options });
const visibility = try sema.interpretBuiltinType(block, visibility_src, visibility_val, std.builtin.SymbolVisibility);
- const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "is_thread_local", .no_embedded_nulls), thread_local_src);
+ const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "is_thread_local", .no_embedded_nulls), thread_local_src);
const is_thread_local_val = try sema.resolveConstDefinedValue(block, thread_local_src, is_thread_local, .{ .simple = .extern_options });
const library_name = if (library_name_val.optionalValue(zcu)) |library_name_payload| library_name: {
@@ -25757,10 +25960,10 @@ fn resolveExternOptions(
break :library_name library_name;
} else null;
- const is_dll_import_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "is_dll_import", .no_embedded_nulls), dll_import_src);
+ const is_dll_import_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "is_dll_import", .no_embedded_nulls), dll_import_src);
const is_dll_import_val = try sema.resolveConstDefinedValue(block, dll_import_src, is_dll_import_ref, .{ .simple = .extern_options });
- const relocation_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "relocation", .no_embedded_nulls), relocation_src);
+ const relocation_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, io, pt.tid, "relocation", .no_embedded_nulls), relocation_src);
const relocation_val = try sema.resolveConstDefinedValue(block, relocation_src, relocation_ref, .{ .simple = .extern_options });
const relocation = try sema.interpretBuiltinType(block, relocation_src, relocation_val, std.builtin.ExternOptions.Relocation);
@@ -25773,8 +25976,8 @@ fn resolveExternOptions(
}
return .{
- .name = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls),
- .library_name = try ip.getOrPutStringOpt(gpa, pt.tid, library_name, .no_embedded_nulls),
+ .name = try ip.getOrPutString(gpa, io, pt.tid, name, .no_embedded_nulls),
+ .library_name = try ip.getOrPutStringOpt(gpa, io, pt.tid, library_name, .no_embedded_nulls),
.linkage = linkage,
.visibility = visibility,
.is_thread_local = is_thread_local_val.toBool(),
@@ -25919,7 +26122,9 @@ fn zirInComptime(
fn zirBuiltinValue(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const src_node: std.zig.Ast.Node.Offset = @enumFromInt(@as(i32, @bitCast(extended.operand)));
@@ -25955,7 +26160,7 @@ fn zirBuiltinValue(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
block,
src,
callconv_ty.getNamespaceIndex(zcu),
- try ip.getOrPutString(gpa, pt.tid, "c", .no_embedded_nulls),
+ try ip.getOrPutString(gpa, io, pt.tid, "c", .no_embedded_nulls),
) orelse @panic("std.builtin is corrupt");
},
.calling_convention_inline => {
@@ -26492,11 +26697,12 @@ fn preparePanicId(sema: *Sema, src: LazySrcLoc, panic_id: Zcu.SimplePanicId) !vo
fn getPanicIdFunc(sema: *Sema, src: LazySrcLoc, panic_id: Zcu.SimplePanicId) !InternPool.Index {
const zcu = sema.pt.zcu;
+ const io = zcu.comp.io;
try sema.ensureMemoizedStateResolved(src, .panic);
const panic_fn_index = zcu.builtin_decl_values.get(panic_id.toBuiltin());
switch (sema.owner.unwrap()) {
.@"comptime", .nav_ty, .nav_val, .type, .memoized_state => {},
- .func => |owner_func| zcu.intern_pool.funcSetHasErrorTrace(owner_func, true),
+ .func => |owner_func| zcu.intern_pool.funcSetHasErrorTrace(io, owner_func, true),
}
return panic_fn_index;
}
@@ -28539,10 +28745,15 @@ fn coerceExtra(
inst_src: LazySrcLoc,
opts: CoerceOpts,
) CoersionError!Air.Inst.Ref {
- if (dest_ty.isGenericPoison()) return inst;
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
+ if (dest_ty.isGenericPoison()) return inst;
+
const dest_ty_src = inst_src; // TODO better source location
try dest_ty.resolveFields(pt);
const inst_ty = sema.typeOf(inst);
@@ -28904,7 +29115,7 @@ fn coerceExtra(
return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
.undef => try pt.undefRef(dest_ty),
.int => |int| Air.internedToRef(
- try zcu.intern_pool.getCoercedInts(zcu.gpa, pt.tid, int, dest_ty.toIntern()),
+ try zcu.intern_pool.getCoercedInts(gpa, io, pt.tid, int, dest_ty.toIntern()),
),
else => unreachable,
};
@@ -30070,6 +30281,10 @@ fn coerceInMemoryAllowedPtrs(
) !InMemoryCoercionResult {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const dest_info = dest_ptr_ty.ptrInfo(zcu);
const src_info = src_ptr_ty.ptrInfo(zcu);
@@ -30175,7 +30390,7 @@ fn coerceInMemoryAllowedPtrs(
const ds = dest_info.sentinel;
if (ss == .none and ds == .none) break :ok true;
if (ss != .none and ds != .none) {
- if (ds == try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, ss, dest_info.child)) break :ok true;
+ if (ds == try zcu.intern_pool.getCoerced(gpa, io, pt.tid, ss, dest_info.child)) break :ok true;
}
if (src_info.flags.size == .c) break :ok true;
if (!dest_is_mut and dest_info.sentinel == .none) break :ok true;
@@ -33086,6 +33301,9 @@ fn resolvePeerTypesInner(
) !PeerResolveResult {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
var strat_reason: usize = 0;
@@ -33412,8 +33630,8 @@ fn resolvePeerTypesInner(
}).toIntern();
if (ptr_info.sentinel != .none and peer_info.sentinel != .none) {
- const peer_sent = try ip.getCoerced(sema.gpa, pt.tid, ptr_info.sentinel, ptr_info.child);
- const ptr_sent = try ip.getCoerced(sema.gpa, pt.tid, peer_info.sentinel, ptr_info.child);
+ const peer_sent = try ip.getCoerced(gpa, io, pt.tid, ptr_info.sentinel, ptr_info.child);
+ const ptr_sent = try ip.getCoerced(gpa, io, pt.tid, peer_info.sentinel, ptr_info.child);
if (ptr_sent == peer_sent) {
ptr_info.sentinel = ptr_sent;
} else {
@@ -33715,8 +33933,8 @@ fn resolvePeerTypesInner(
no_sentinel: {
if (peer_sentinel == .none) break :no_sentinel;
if (cur_sentinel == .none) break :no_sentinel;
- const peer_sent_coerced = try ip.getCoerced(sema.gpa, pt.tid, peer_sentinel, sentinel_ty);
- const cur_sent_coerced = try ip.getCoerced(sema.gpa, pt.tid, cur_sentinel, sentinel_ty);
+ const peer_sent_coerced = try ip.getCoerced(gpa, io, pt.tid, peer_sentinel, sentinel_ty);
+ const cur_sent_coerced = try ip.getCoerced(gpa, io, pt.tid, cur_sentinel, sentinel_ty);
if (peer_sent_coerced != cur_sent_coerced) break :no_sentinel;
// Sentinels match
if (ptr_info.flags.size == .one) switch (ip.indexToKey(ptr_info.child)) {
@@ -34081,7 +34299,7 @@ fn resolvePeerTypesInner(
else => |result| {
const result_buf = try sema.arena.create(PeerResolveResult);
result_buf.* = result;
- const field_name = try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
+ const field_name = try ip.getOrPutStringFmt(gpa, io, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
// The error info needs the field types, but we can't reuse sub_peer_tys
// since the recursive call may have clobbered it.
@@ -34136,7 +34354,7 @@ fn resolvePeerTypesInner(
field_val.* = if (comptime_val) |v| v.toIntern() else .none;
}
- const final_ty = try ip.getTupleType(zcu.gpa, pt.tid, .{
+ const final_ty = try ip.getTupleType(gpa, io, pt.tid, .{
.types = field_types,
.values = field_vals,
});
@@ -34274,6 +34492,7 @@ pub fn resolveStructAlignment(
) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
+ const io = zcu.comp.io;
const ip = &zcu.intern_pool;
const target = zcu.getTarget();
@@ -34287,15 +34506,15 @@ pub fn resolveStructAlignment(
// We'll guess "pointer-aligned", if the struct has an
// underaligned pointer field then some allocations
// might require explicit alignment.
- if (struct_type.assumePointerAlignedIfFieldTypesWip(ip, ptr_align)) return;
+ if (struct_type.assumePointerAlignedIfFieldTypesWip(ip, io, ptr_align)) return;
try sema.resolveStructFieldTypes(ty, struct_type);
// We'll guess "pointer-aligned", if the struct has an
// underaligned pointer field then some allocations
// might require explicit alignment.
- if (struct_type.assumePointerAlignedIfWip(ip, ptr_align)) return;
- defer struct_type.clearAlignmentWip(ip);
+ if (struct_type.assumePointerAlignedIfWip(ip, io, ptr_align)) return;
+ defer struct_type.clearAlignmentWip(ip, io);
// No `zcu.trackUnitSema` calls, since this phase isn't really doing any semantic analysis.
// It's just triggering *other* analysis, alongside a simple loop over already-resolved info.
@@ -34314,13 +34533,14 @@ pub fn resolveStructAlignment(
alignment = alignment.maxStrict(field_align);
}
- struct_type.setAlignment(ip, alignment);
+ struct_type.setAlignment(ip, io, alignment);
}
pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
+ const io = zcu.comp.io;
const struct_type = zcu.typeToStruct(ty) orelse return;
assert(sema.owner.unwrap().type == ty.toIntern());
@@ -34341,7 +34561,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
return;
}
- if (struct_type.setLayoutWip(ip)) {
+ if (struct_type.setLayoutWip(ip, io)) {
const msg = try sema.errMsg(
ty.srcLoc(zcu),
"struct '{f}' depends on itself",
@@ -34349,7 +34569,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
);
return sema.failWithOwnedErrorMsg(null, msg);
}
- defer struct_type.clearLayoutWip(ip);
+ defer struct_type.clearLayoutWip(ip, io);
const aligns = try sema.arena.alloc(Alignment, struct_type.field_types.len);
const sizes = try sema.arena.alloc(u64, struct_type.field_types.len);
@@ -34468,7 +34688,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
);
return sema.failWithOwnedErrorMsg(null, msg);
};
- struct_type.setLayoutResolved(ip, size, big_align);
+ struct_type.setLayoutResolved(ip, io, size, big_align);
_ = try ty.comptimeOnlySema(pt);
}
@@ -34478,7 +34698,9 @@ fn backingIntType(
) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
@@ -34546,13 +34768,13 @@ fn backingIntType(
};
try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum);
- struct_type.setBackingIntType(ip, backing_int_ty.toIntern());
+ struct_type.setBackingIntType(ip, io, backing_int_ty.toIntern());
} else {
if (fields_bit_sum > std.math.maxInt(u16)) {
return sema.fail(&block, block.nodeOffset(.zero), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum});
}
const backing_int_ty = try pt.intType(.unsigned, @intCast(fields_bit_sum));
- struct_type.setBackingIntType(ip, backing_int_ty.toIntern());
+ struct_type.setBackingIntType(ip, io, backing_int_ty.toIntern());
}
try sema.flushExports();
@@ -34620,6 +34842,7 @@ pub fn resolveUnionAlignment(
) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
+ const io = zcu.comp.io;
const ip = &zcu.intern_pool;
const target = zcu.getTarget();
@@ -34632,7 +34855,7 @@ pub fn resolveUnionAlignment(
// We'll guess "pointer-aligned", if the union has an
// underaligned pointer field then some allocations
// might require explicit alignment.
- if (union_type.assumePointerAlignedIfFieldTypesWip(ip, ptr_align)) return;
+ if (union_type.assumePointerAlignedIfFieldTypesWip(ip, io, ptr_align)) return;
try sema.resolveUnionFieldTypes(ty, union_type);
@@ -34653,12 +34876,13 @@ pub fn resolveUnionAlignment(
max_align = max_align.max(field_align);
}
- union_type.setAlignment(ip, max_align);
+ union_type.setAlignment(ip, io, max_align);
}
/// This logic must be kept in sync with `Type.getUnionLayout`.
pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
const pt = sema.pt;
+ const io = pt.zcu.comp.io;
const ip = &pt.zcu.intern_pool;
try sema.resolveUnionFieldTypes(ty, ip.loadUnionType(ty.ip_index));
@@ -34682,9 +34906,9 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
.have_layout, .fully_resolved_wip, .fully_resolved => return,
}
- errdefer union_type.setStatusIfLayoutWip(ip, old_flags.status);
+ errdefer union_type.setStatusIfLayoutWip(ip, io, old_flags.status);
- union_type.setStatus(ip, .layout_wip);
+ union_type.setStatus(ip, io, .layout_wip);
// No `zcu.trackUnitSema` calls, since this phase isn't really doing any semantic analysis.
// It's just triggering *other* analysis, alongside a simple loop over already-resolved info.
@@ -34765,7 +34989,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
);
return sema.failWithOwnedErrorMsg(null, msg);
};
- union_type.setHaveLayout(ip, casted_size, padding, alignment);
+ union_type.setHaveLayout(ip, io, casted_size, padding, alignment);
if (union_type.flagsUnordered(ip).assumed_runtime_bits and !(try ty.hasRuntimeBitsSema(pt))) {
const msg = try sema.errMsg(
@@ -34797,13 +35021,14 @@ pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
+ const io = zcu.comp.io;
const ip = &zcu.intern_pool;
const struct_type = zcu.typeToStruct(ty).?;
assert(sema.owner.unwrap().type == ty.toIntern());
- if (struct_type.setFullyResolved(ip)) return;
- errdefer struct_type.clearFullyResolved(ip);
+ if (struct_type.setFullyResolved(ip, io)) return;
+ errdefer struct_type.clearFullyResolved(ip, io);
// No `zcu.trackUnitSema` calls, since this phase isn't really doing any semantic analysis.
// It's just triggering *other* analysis, alongside a simple loop over already-resolved info.
@@ -34823,6 +35048,7 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
+ const io = zcu.comp.io;
const ip = &zcu.intern_pool;
const union_obj = zcu.typeToUnion(ty).?;
@@ -34841,14 +35067,14 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void {
// make sure pointer fields get their child types resolved as well.
// See also similar code for structs.
const prev_status = union_obj.flagsUnordered(ip).status;
- errdefer union_obj.setStatus(ip, prev_status);
+ errdefer union_obj.setStatus(ip, io, prev_status);
- union_obj.setStatus(ip, .fully_resolved_wip);
+ union_obj.setStatus(ip, io, .fully_resolved_wip);
for (0..union_obj.field_types.len) |field_index| {
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
try field_ty.resolveFully(pt);
}
- union_obj.setStatus(ip, .fully_resolved);
+ union_obj.setStatus(ip, io, .fully_resolved);
}
// And let's not forget comptime-only status.
@@ -34862,13 +35088,14 @@ pub fn resolveStructFieldTypes(
) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
+ const io = zcu.comp.io;
const ip = &zcu.intern_pool;
assert(sema.owner.unwrap().type == ty);
if (struct_type.haveFieldTypes(ip)) return;
- if (struct_type.setFieldTypesWip(ip)) {
+ if (struct_type.setFieldTypesWip(ip, io)) {
const msg = try sema.errMsg(
Type.fromInterned(ty).srcLoc(zcu),
"struct '{f}' depends on itself",
@@ -34876,7 +35103,7 @@ pub fn resolveStructFieldTypes(
);
return sema.failWithOwnedErrorMsg(null, msg);
}
- defer struct_type.clearFieldTypesWip(ip);
+ defer struct_type.clearFieldTypesWip(ip, io);
// can't happen earlier than this because we only want the progress node if not already resolved
const tracked_unit = zcu.trackUnitSema(struct_type.name.toSlice(ip), null);
@@ -34891,6 +35118,7 @@ pub fn resolveStructFieldTypes(
pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
+ const io = zcu.comp.io;
const ip = &zcu.intern_pool;
const struct_type = zcu.typeToStruct(ty) orelse return;
@@ -34901,7 +35129,7 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void {
try sema.resolveStructLayout(ty);
- if (struct_type.setInitsWip(ip)) {
+ if (struct_type.setInitsWip(ip, io)) {
const msg = try sema.errMsg(
ty.srcLoc(zcu),
"struct '{f}' depends on itself",
@@ -34909,7 +35137,7 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void {
);
return sema.failWithOwnedErrorMsg(null, msg);
}
- defer struct_type.clearInitsWip(ip);
+ defer struct_type.clearInitsWip(ip, io);
// can't happen earlier than this because we only want the progress node if not already resolved
const tracked_unit = zcu.trackUnitSema(struct_type.name.toSlice(ip), null);
@@ -34919,12 +35147,13 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void {
error.AnalysisFail, error.OutOfMemory, error.Canceled => |e| return e,
error.ComptimeBreak, error.ComptimeReturn => unreachable,
};
- struct_type.setHaveFieldInits(ip);
+ struct_type.setHaveFieldInits(ip, io);
}
pub fn resolveUnionFieldTypes(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
+ const io = zcu.comp.io;
const ip = &zcu.intern_pool;
assert(sema.owner.unwrap().type == ty.toIntern());
@@ -34947,13 +35176,13 @@ pub fn resolveUnionFieldTypes(sema: *Sema, ty: Type, union_type: InternPool.Load
const tracked_unit = zcu.trackUnitSema(union_type.name.toSlice(ip), null);
defer tracked_unit.end(zcu);
- union_type.setStatus(ip, .field_types_wip);
- errdefer union_type.setStatus(ip, .none);
+ union_type.setStatus(ip, io, .field_types_wip);
+ errdefer union_type.setStatus(ip, io, .none);
sema.unionFields(ty.toIntern(), union_type) catch |err| switch (err) {
error.AnalysisFail, error.OutOfMemory, error.Canceled => |e| return e,
error.ComptimeBreak, error.ComptimeReturn => unreachable,
};
- union_type.setStatus(ip, .have_field_types);
+ union_type.setStatus(ip, io, .have_field_types);
}
/// Returns a normal error set corresponding to the fully populated inferred
@@ -35055,11 +35284,14 @@ fn resolveAdHocInferredErrorSet(
) CompileError!InternPool.Index {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const new_ty = try resolveAdHocInferredErrorSetTy(sema, block, src, ip.typeOf(value));
if (new_ty == .none) return value;
- return ip.getCoerced(gpa, pt.tid, value, new_ty);
+ return ip.getCoerced(gpa, io, pt.tid, value, new_ty);
}
fn resolveAdHocInferredErrorSetTy(
@@ -35159,8 +35391,11 @@ fn structFields(
) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const namespace_index = struct_type.namespace;
const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir.?;
const zir_index = struct_type.zir_index.resolve(ip) orelse return error.AnalysisFail;
@@ -35173,7 +35408,7 @@ fn structFields(
return;
},
.auto, .@"extern" => {
- struct_type.setLayoutResolved(ip, 0, .none);
+ struct_type.setLayoutResolved(ip, io, 0, .none);
return;
},
};
@@ -35245,7 +35480,7 @@ fn structFields(
extra_index += 1;
// This string needs to outlive the ZIR code.
- const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls);
+ const field_name = try ip.getOrPutString(gpa, io, pt.tid, field_name_zir, .no_embedded_nulls);
assert(struct_type.addFieldName(ip, field_name) == null);
if (has_align) {
@@ -35345,8 +35580,8 @@ fn structFields(
extra_index += zir_field.init_body_len;
}
- struct_type.clearFieldTypesWip(ip);
- if (!any_inits) struct_type.setHaveFieldInits(ip);
+ struct_type.clearFieldTypesWip(ip, io);
+ if (!any_inits) struct_type.setHaveFieldInits(ip, io);
try sema.flushExports();
}
@@ -35485,8 +35720,11 @@ fn unionFields(
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
const zir = zcu.namespacePtr(union_type.namespace).fileScope(zcu).zir.?;
const zir_index = union_type.zir_index.resolve(ip) orelse return error.AnalysisFail;
const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended;
@@ -35595,7 +35833,7 @@ fn unionFields(
.enum_type => ip.loadEnumType(provided_ty.toIntern()),
else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{f}'", .{provided_ty.fmt(pt)}),
};
- union_type.setTagType(ip, provided_ty.toIntern());
+ union_type.setTagType(ip, io, provided_ty.toIntern());
// The fields of the union must match the enum exactly.
// A flag per field is used to check for missing and extraneous fields.
explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len);
@@ -35727,7 +35965,7 @@ fn unionFields(
}
// This string needs to outlive the ZIR code.
- const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls);
+ const field_name = try ip.getOrPutString(gpa, io, pt.tid, field_name_zir, .no_embedded_nulls);
if (enum_field_names.len != 0) {
enum_field_names[field_i] = field_name;
}
@@ -35871,10 +36109,10 @@ fn unionFields(
}
} else if (enum_field_vals.count() > 0) {
const enum_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), union_ty, union_type.name);
- union_type.setTagType(ip, enum_ty);
+ union_type.setTagType(ip, io, enum_ty);
} else {
const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, union_ty, union_type.name);
- union_type.setTagType(ip, enum_ty);
+ union_type.setTagType(ip, io, enum_ty);
}
try sema.flushExports();
@@ -35890,18 +36128,21 @@ fn generateUnionTagTypeNumbered(
) !InternPool.Index {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = sema.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const name = try ip.getOrPutStringFmt(
gpa,
+ io,
pt.tid,
"@typeInfo({f}).@\"union\".tag_type.?",
.{union_name.fmt(ip)},
.no_embedded_nulls,
);
- const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{
+ const enum_ty = try ip.getGeneratedTagEnumType(gpa, io, pt.tid, .{
.name = name,
.owner_union_ty = union_type,
.tag_ty = if (enum_field_vals.len == 0)
@@ -35926,18 +36167,21 @@ fn generateUnionTagTypeSimple(
) !InternPool.Index {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
- const gpa = sema.gpa;
const name = try ip.getOrPutStringFmt(
gpa,
+ io,
pt.tid,
"@typeInfo({f}).@\"union\".tag_type.?",
.{union_name.fmt(ip)},
.no_embedded_nulls,
);
- const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{
+ const enum_ty = try ip.getGeneratedTagEnumType(gpa, io, pt.tid, .{
.name = name,
.owner_union_ty = union_type,
.tag_ty = (try pt.smallestUnsignedInt(enum_field_names.len -| 1)).toIntern(),
@@ -35958,7 +36202,11 @@ fn generateUnionTagTypeSimple(
pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+
return switch (ty.toIntern()) {
.u0_type,
.i0_type,
@@ -36302,7 +36550,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
(try pt.intValue(.fromInterned(enum_type.tag_ty), 0)).toIntern()
else
try ip.getCoercedInts(
- zcu.gpa,
+ gpa,
+ io,
pt.tid,
ip.indexToKey(enum_type.values.get(ip)[0]).int,
enum_type.tag_ty,
@@ -36936,12 +37185,16 @@ fn checkRuntimeValue(sema: *Sema, ptr: Air.Inst.Ref) bool {
fn validateRuntimeValue(sema: *Sema, block: *Block, val_src: LazySrcLoc, val: Air.Inst.Ref) CompileError!void {
if (sema.checkRuntimeValue(val)) return;
return sema.failWithOwnedErrorMsg(block, msg: {
- const msg = try sema.errMsg(val_src, "runtime value contains reference to comptime var", .{});
- errdefer msg.destroy(sema.gpa);
- try sema.errNote(val_src, msg, "comptime var pointers are not available at runtime", .{});
const pt = sema.pt;
const zcu = pt.zcu;
- const val_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "runtime_value", .no_embedded_nulls);
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
+ const msg = try sema.errMsg(val_src, "runtime value contains reference to comptime var", .{});
+ errdefer msg.destroy(gpa);
+ try sema.errNote(val_src, msg, "comptime var pointers are not available at runtime", .{});
+ const val_str = try pt.zcu.intern_pool.getOrPutString(gpa, io, pt.tid, "runtime_value", .no_embedded_nulls);
try sema.explainWhyValueContainsReferenceToComptimeVar(msg, val_src, val_str, .fromInterned(val.toInterned().?));
break :msg msg;
});
@@ -37385,7 +37638,9 @@ fn resolveDeclaredEnumInner(
) Zcu.CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
@@ -37430,7 +37685,7 @@ fn resolveDeclaredEnumInner(
const field_name_zir = zir.nullTerminatedString(field_name_index);
extra_index += 1; // field name
- const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls);
+ const field_name = try ip.getOrPutString(gpa, io, pt.tid, field_name_zir, .no_embedded_nulls);
const value_src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
@@ -37541,7 +37796,9 @@ pub fn resolveNavPtrModifiers(
) CompileError!NavPtrModifiers {
const pt = sema.pt;
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const align_src = block.src(.{ .node_offset_var_decl_align = .zero });
@@ -37563,7 +37820,7 @@ pub fn resolveNavPtrModifiers(
} else if (bytes.len == 0) {
return sema.fail(block, section_src, "linksection cannot be empty", .{});
}
- break :ls try ip.getOrPutStringOpt(gpa, pt.tid, bytes, .no_embedded_nulls);
+ break :ls try ip.getOrPutStringOpt(gpa, io, pt.tid, bytes, .no_embedded_nulls);
};
const @"addrspace": std.builtin.AddressSpace = as: {
@@ -37595,8 +37852,10 @@ pub fn resolveNavPtrModifiers(
pub fn analyzeMemoizedState(sema: *Sema, block: *Block, simple_src: LazySrcLoc, builtin_namespace: InternPool.NamespaceIndex, stage: InternPool.MemoizedStateStage) CompileError!bool {
const pt = sema.pt;
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
- const gpa = zcu.gpa;
var any_changed = false;
@@ -37613,7 +37872,7 @@ pub fn analyzeMemoizedState(sema: *Sema, block: *Block, simple_src: LazySrcLoc,
},
};
- const name_nts = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls);
+ const name_nts = try ip.getOrPutString(gpa, io, pt.tid, name, .no_embedded_nulls);
const nav = try sema.namespaceLookup(block, simple_src, parent_ns, name_nts) orelse
return sema.fail(block, simple_src, "{s} missing {s}", .{ parent_name, name });
diff --git a/src/Sema/LowerZon.zig b/src/Sema/LowerZon.zig
index eb1fba121a..76cf3d7f2c 100644
--- a/src/Sema/LowerZon.zig
+++ b/src/Sema/LowerZon.zig
@@ -38,8 +38,11 @@ pub fn run(
block: *Sema.Block,
) CompileError!InternPool.Index {
const pt = sema.pt;
+ const comp = pt.zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
- const tracked_inst = try pt.zcu.intern_pool.trackZir(pt.zcu.gpa, pt.tid, .{
+ const tracked_inst = try pt.zcu.intern_pool.trackZir(gpa, io, pt.tid, .{
.file = file_index,
.inst = .main_struct_inst, // this is the only trackable instruction in a ZON file
});
@@ -63,8 +66,10 @@ pub fn run(
}
fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!InternPool.Index {
- const gpa = self.sema.gpa;
const pt = self.sema.pt;
+ const comp = pt.zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &pt.zcu.intern_pool;
switch (node.get(self.file.zoir.?)) {
.true => return .bool_true,
@@ -94,13 +99,14 @@ fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!Inter
.enum_literal => |val| return pt.intern(.{
.enum_literal = try ip.getOrPutString(
gpa,
+ io,
pt.tid,
val.get(self.file.zoir.?),
.no_embedded_nulls,
),
}),
.string_literal => |val| {
- const ip_str = try ip.getOrPutString(gpa, pt.tid, val, .maybe_embedded_nulls);
+ const ip_str = try ip.getOrPutString(gpa, io, pt.tid, val, .maybe_embedded_nulls);
const result = try self.sema.addStrLit(ip_str, val.len);
return result.toInterned().?;
},
@@ -112,14 +118,10 @@ fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!Inter
values[i] = try self.lowerExprAnonResTy(nodes.at(@intCast(i)));
types[i] = Value.fromInterned(values[i]).typeOf(pt.zcu).toIntern();
}
- const ty = try ip.getTupleType(
- gpa,
- pt.tid,
- .{
- .types = types,
- .values = values,
- },
- );
+ const ty = try ip.getTupleType(gpa, io, pt.tid, .{
+ .types = types,
+ .values = values,
+ });
return (try pt.aggregateValue(.fromInterned(ty), values)).toIntern();
},
.struct_literal => |init| {
@@ -129,6 +131,7 @@ fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!Inter
}
const struct_ty = switch (try ip.getStructType(
gpa,
+ io,
pt.tid,
.{
.layout = .auto,
@@ -168,6 +171,7 @@ fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!Inter
for (init.names, 0..) |name, field_idx| {
const name_interned = try ip.getOrPutString(
gpa,
+ io,
pt.tid,
name.get(self.file.zoir.?),
.no_embedded_nulls,
@@ -636,11 +640,16 @@ fn lowerArray(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
}
fn lowerEnum(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.Index {
- const ip = &self.sema.pt.zcu.intern_pool;
+ const pt = self.sema.pt;
+ const comp = pt.zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const ip = &pt.zcu.intern_pool;
switch (node.get(self.file.zoir.?)) {
.enum_literal => |field_name| {
const field_name_interned = try ip.getOrPutString(
- self.sema.gpa,
+ gpa,
+ io,
self.sema.pt.tid,
field_name.get(self.file.zoir.?),
.no_embedded_nulls,
@@ -665,11 +674,16 @@ fn lowerEnum(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.I
}
fn lowerEnumLiteral(self: *LowerZon, node: Zoir.Node.Index) !InternPool.Index {
- const ip = &self.sema.pt.zcu.intern_pool;
+ const pt = self.sema.pt;
+ const comp = pt.zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const ip = &pt.zcu.intern_pool;
switch (node.get(self.file.zoir.?)) {
.enum_literal => |field_name| {
const field_name_interned = try ip.getOrPutString(
- self.sema.gpa,
+ gpa,
+ io,
self.sema.pt.tid,
field_name.get(self.file.zoir.?),
.no_embedded_nulls,
@@ -747,8 +761,11 @@ fn lowerTuple(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
}
fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.Index {
- const ip = &self.sema.pt.zcu.intern_pool;
- const gpa = self.sema.gpa;
+ const pt = self.sema.pt;
+ const comp = pt.zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const ip = &pt.zcu.intern_pool;
try res_ty.resolveFields(self.sema.pt);
try res_ty.resolveStructFieldInits(self.sema.pt);
@@ -772,6 +789,7 @@ fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool
for (0..fields.names.len) |i| {
const field_name = try ip.getOrPutString(
gpa,
+ io,
self.sema.pt.tid,
fields.names[i].get(self.file.zoir.?),
.no_embedded_nulls,
@@ -807,8 +825,11 @@ fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool
}
fn lowerSlice(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.Index {
- const ip = &self.sema.pt.zcu.intern_pool;
- const gpa = self.sema.gpa;
+ const pt = self.sema.pt;
+ const comp = pt.zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const ip = &pt.zcu.intern_pool;
const ptr_info = res_ty.ptrInfo(self.sema.pt.zcu);
@@ -820,7 +841,7 @@ fn lowerSlice(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
if (string_alignment and ptr_info.child == .u8_type and string_sentinel) {
switch (node.get(self.file.zoir.?)) {
.string_literal => |val| {
- const ip_str = try ip.getOrPutString(gpa, self.sema.pt.tid, val, .maybe_embedded_nulls);
+ const ip_str = try ip.getOrPutString(gpa, io, self.sema.pt.tid, val, .maybe_embedded_nulls);
const str_ref = try self.sema.addStrLit(ip_str, val.len);
return (try self.sema.coerce(
self.block,
@@ -892,7 +913,11 @@ fn lowerSlice(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
}
fn lowerUnion(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.Index {
- const ip = &self.sema.pt.zcu.intern_pool;
+ const pt = self.sema.pt;
+ const comp = pt.zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const ip = &pt.zcu.intern_pool;
try res_ty.resolveFields(self.sema.pt);
const union_info = self.sema.pt.zcu.typeToUnion(res_ty).?;
const enum_tag_info = union_info.loadTagType(ip);
@@ -900,7 +925,8 @@ fn lowerUnion(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
const field_name, const maybe_field_node = switch (node.get(self.file.zoir.?)) {
.enum_literal => |name| b: {
const field_name = try ip.getOrPutString(
- self.sema.gpa,
+ gpa,
+ io,
self.sema.pt.tid,
name.get(self.file.zoir.?),
.no_embedded_nulls,
@@ -916,7 +942,8 @@ fn lowerUnion(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
return error.WrongType;
}
const field_name = try ip.getOrPutString(
- self.sema.gpa,
+ gpa,
+ io,
self.sema.pt.tid,
fields.names[0].get(self.file.zoir.?),
.no_embedded_nulls,
@@ -942,7 +969,7 @@ fn lowerUnion(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
}
break :b .void_value;
};
- return ip.getUnion(self.sema.pt.zcu.gpa, self.sema.pt.tid, .{
+ return ip.getUnion(gpa, io, self.sema.pt.tid, .{
.ty = res_ty.toIntern(),
.tag = tag.toIntern(),
.val = val,
diff --git a/src/Type.zig b/src/Type.zig
index a36bca94b9..e6a8965409 100644
--- a/src/Type.zig
+++ b/src/Type.zig
@@ -486,6 +486,7 @@ pub fn hasRuntimeBitsInner(
tid: strat.Tid(),
) RuntimeBitsError!bool {
const ip = &zcu.intern_pool;
+ const io = zcu.comp.io;
return switch (ty.toIntern()) {
.empty_tuple_type => false,
else => switch (ip.indexToKey(ty.toIntern())) {
@@ -571,7 +572,7 @@ pub fn hasRuntimeBitsInner(
},
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
- if (strat != .eager and struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) {
+ if (strat != .eager and struct_type.assumeRuntimeBitsIfFieldTypesWip(ip, io)) {
// In this case, we guess that hasRuntimeBits() for this type is true,
// and then later if our guess was incorrect, we emit a compile error.
return true;
@@ -610,7 +611,7 @@ pub fn hasRuntimeBitsInner(
.none => if (strat != .eager) {
// In this case, we guess that hasRuntimeBits() for this type is true,
// and then later if our guess was incorrect, we emit a compile error.
- if (union_type.assumeRuntimeBitsIfFieldTypesWip(ip)) return true;
+ if (union_type.assumeRuntimeBitsIfFieldTypesWip(ip, io)) return true;
},
.safety, .tagged => {},
}
@@ -2491,8 +2492,11 @@ pub fn isNumeric(ty: Type, zcu: *const Zcu) bool {
/// resolves field types rather than asserting they are already resolved.
pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
const zcu = pt.zcu;
- var ty = starting_type;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
+ var ty = starting_type;
while (true) switch (ty.toIntern()) {
.empty_tuple_type => return Value.empty_tuple,
@@ -2664,7 +2668,8 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
(try pt.intValue(.fromInterned(enum_type.tag_ty), 0)).toIntern()
else
try ip.getCoercedInts(
- zcu.gpa,
+ gpa,
+ io,
pt.tid,
ip.indexToKey(enum_type.values.get(ip)[0]).int,
enum_type.tag_ty,
@@ -2720,6 +2725,7 @@ pub fn comptimeOnlyInner(
tid: strat.Tid(),
) SemaError!bool {
const ip = &zcu.intern_pool;
+ const io = zcu.comp.io;
return switch (ty.toIntern()) {
.empty_tuple_type => false,
@@ -2798,16 +2804,16 @@ pub fn comptimeOnlyInner(
.yes => true,
.unknown => unreachable,
},
- .sema => switch (struct_type.setRequiresComptimeWip(ip)) {
+ .sema => switch (struct_type.setRequiresComptimeWip(ip, io)) {
.no, .wip => false,
.yes => true,
.unknown => {
if (struct_type.flagsUnordered(ip).field_types_wip) {
- struct_type.setRequiresComptime(ip, .unknown);
+ struct_type.setRequiresComptime(ip, io, .unknown);
return false;
}
- errdefer struct_type.setRequiresComptime(ip, .unknown);
+ errdefer struct_type.setRequiresComptime(ip, io, .unknown);
const pt = strat.pt(zcu, tid);
try ty.resolveFields(pt);
@@ -2821,12 +2827,12 @@ pub fn comptimeOnlyInner(
// be considered resolved. Comptime-only types
// still maintain a layout of their
// runtime-known fields.
- struct_type.setRequiresComptime(ip, .yes);
+ struct_type.setRequiresComptime(ip, io, .yes);
return true;
}
}
- struct_type.setRequiresComptime(ip, .no);
+ struct_type.setRequiresComptime(ip, io, .no);
return false;
},
},
@@ -2850,16 +2856,16 @@ pub fn comptimeOnlyInner(
.yes => true,
.unknown => unreachable,
},
- .sema => switch (union_type.setRequiresComptimeWip(ip)) {
+ .sema => switch (union_type.setRequiresComptimeWip(ip, io)) {
.no, .wip => return false,
.yes => return true,
.unknown => {
if (union_type.flagsUnordered(ip).status == .field_types_wip) {
- union_type.setRequiresComptime(ip, .unknown);
+ union_type.setRequiresComptime(ip, io, .unknown);
return false;
}
- errdefer union_type.setRequiresComptime(ip, .unknown);
+ errdefer union_type.setRequiresComptime(ip, io, .unknown);
const pt = strat.pt(zcu, tid);
try ty.resolveFields(pt);
@@ -2867,12 +2873,12 @@ pub fn comptimeOnlyInner(
for (0..union_type.field_types.len) |field_idx| {
const field_ty = union_type.field_types.get(ip)[field_idx];
if (try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) {
- union_type.setRequiresComptime(ip, .yes);
+ union_type.setRequiresComptime(ip, io, .yes);
return true;
}
}
- union_type.setRequiresComptime(ip, .no);
+ union_type.setRequiresComptime(ip, io, .no);
return false;
},
},
diff --git a/src/Value.zig b/src/Value.zig
index e2699746fa..103140d3c9 100644
--- a/src/Value.zig
+++ b/src/Value.zig
@@ -60,18 +60,21 @@ pub fn fmtValueSemaFull(ctx: print_value.FormatContext) std.fmt.Alt(print_value.
/// Asserts `val` is an array of `u8`
pub fn toIpString(val: Value, ty: Type, pt: Zcu.PerThread) !InternPool.NullTerminatedString {
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const ip = &zcu.intern_pool;
assert(ty.zigTypeTag(zcu) == .array);
assert(ty.childType(zcu).toIntern() == .u8_type);
- const ip = &zcu.intern_pool;
switch (zcu.intern_pool.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| return bytes.toNullTerminatedString(ty.arrayLen(zcu), ip),
.elems => return arrayToIpString(val, ty.arrayLen(zcu), pt),
.repeated_elem => |elem| {
const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(zcu));
const len: u32 = @intCast(ty.arrayLen(zcu));
- const string_bytes = ip.getLocal(pt.tid).getMutableStringBytes(zcu.gpa);
+ const string_bytes = ip.getLocal(pt.tid).getMutableStringBytes(gpa, io);
try string_bytes.appendNTimes(.{byte}, len);
- return ip.getOrPutTrailingString(zcu.gpa, pt.tid, len, .no_embedded_nulls);
+ return ip.getOrPutTrailingString(gpa, io, pt.tid, len, .no_embedded_nulls);
},
}
}
@@ -109,10 +112,12 @@ fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, pt: Zcu.Per
fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.NullTerminatedString {
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const len: u32 = @intCast(len_u64);
- const string_bytes = ip.getLocal(pt.tid).getMutableStringBytes(gpa);
+ const string_bytes = ip.getLocal(pt.tid).getMutableStringBytes(gpa, io);
try string_bytes.ensureUnusedCapacity(len);
for (0..len) |i| {
// I don't think elemValue has the possibility to affect ip.string_bytes. Let's
@@ -123,7 +128,7 @@ fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.Null
const byte: u8 = @intCast(elem_val.toUnsignedInt(zcu));
string_bytes.appendAssumeCapacity(.{byte});
}
- return ip.getOrPutTrailingString(gpa, pt.tid, len, .no_embedded_nulls);
+ return ip.getOrPutTrailingString(gpa, io, pt.tid, len, .no_embedded_nulls);
}
pub fn fromInterned(i: InternPool.Index) Value {
@@ -1141,6 +1146,7 @@ pub fn sliceArray(
) error{OutOfMemory}!Value {
const pt = sema.pt;
const ip = &pt.zcu.intern_pool;
+ const io = pt.zcu.comp.io;
return Value.fromInterned(try pt.intern(.{
.aggregate = .{
.ty = switch (pt.zcu.intern_pool.indexToKey(pt.zcu.intern_pool.typeOf(val.toIntern()))) {
@@ -1160,6 +1166,7 @@ pub fn sliceArray(
try ip.string_bytes.ensureUnusedCapacity(sema.gpa, end - start + 1);
break :storage .{ .bytes = try ip.getOrPutString(
sema.gpa,
+ io,
bytes.toSlice(end, ip)[start..],
.maybe_embedded_nulls,
) };
@@ -2874,6 +2881,7 @@ const interpret_mode: InterpretMode = @field(InterpretMode, @tagName(build_optio
/// `val` must be fully resolved.
pub fn interpret(val: Value, comptime T: type, pt: Zcu.PerThread) error{ OutOfMemory, UndefinedValue, TypeMismatch }!T {
const zcu = pt.zcu;
+ const io = zcu.comp.io;
const ip = &zcu.intern_pool;
const ty = val.typeOf(zcu);
if (ty.zigTypeTag(zcu) != @typeInfo(T)) return error.TypeMismatch;
@@ -2960,7 +2968,7 @@ pub fn interpret(val: Value, comptime T: type, pt: Zcu.PerThread) error{ OutOfMe
const struct_obj = zcu.typeToStruct(ty) orelse return error.TypeMismatch;
var result: T = undefined;
inline for (@"struct".fields) |field| {
- const field_name_ip = try ip.getOrPutString(zcu.gpa, pt.tid, field.name, .no_embedded_nulls);
+ const field_name_ip = try ip.getOrPutString(zcu.gpa, io, pt.tid, field.name, .no_embedded_nulls);
@field(result, field.name) = if (struct_obj.nameIndex(ip, field_name_ip)) |field_idx| f: {
const field_val = try val.fieldValue(pt, field_idx);
break :f try field_val.interpret(field.type, pt);
@@ -2979,6 +2987,7 @@ pub fn uninterpret(val: anytype, ty: Type, pt: Zcu.PerThread) error{ OutOfMemory
const T = @TypeOf(val);
const zcu = pt.zcu;
+ const io = zcu.comp.io;
const ip = &zcu.intern_pool;
if (ty.zigTypeTag(zcu) != @typeInfo(T)) return error.TypeMismatch;
@@ -3022,7 +3031,7 @@ pub fn uninterpret(val: anytype, ty: Type, pt: Zcu.PerThread) error{ OutOfMemory
.@"enum" => switch (interpret_mode) {
.direct => try pt.enumValue(ty, (try uninterpret(@intFromEnum(val), ty.intTagType(zcu), pt)).toIntern()),
.by_name => {
- const field_name_ip = try ip.getOrPutString(zcu.gpa, pt.tid, @tagName(val), .no_embedded_nulls);
+ const field_name_ip = try ip.getOrPutString(zcu.gpa, io, pt.tid, @tagName(val), .no_embedded_nulls);
const field_idx = ty.enumFieldIndex(field_name_ip, zcu) orelse return error.TypeMismatch;
return pt.enumValueFieldIndex(ty, field_idx);
},
@@ -3059,7 +3068,7 @@ pub fn uninterpret(val: anytype, ty: Type, pt: Zcu.PerThread) error{ OutOfMemory
defer zcu.gpa.free(field_vals);
@memset(field_vals, .none);
inline for (@"struct".fields) |field| {
- const field_name_ip = try ip.getOrPutString(zcu.gpa, pt.tid, field.name, .no_embedded_nulls);
+ const field_name_ip = try ip.getOrPutString(zcu.gpa, io, pt.tid, field.name, .no_embedded_nulls);
if (struct_obj.nameIndex(ip, field_name_ip)) |field_idx| {
const field_ty = ty.fieldType(field_idx, zcu);
field_vals[field_idx] = (try uninterpret(@field(val, field.name), field_ty, pt)).toIntern();
diff --git a/src/Zcu.zig b/src/Zcu.zig
index 59f6c8ee91..137b4d8b59 100644
--- a/src/Zcu.zig
+++ b/src/Zcu.zig
@@ -37,6 +37,7 @@ const InternPool = @import("InternPool.zig");
const Alignment = InternPool.Alignment;
const AnalUnit = InternPool.AnalUnit;
const BuiltinFn = std.zig.BuiltinFn;
+const codegen = @import("codegen.zig");
const LlvmObject = @import("codegen/llvm.zig").Object;
const dev = @import("dev.zig");
const Zoir = std.zig.Zoir;
@@ -317,6 +318,8 @@ incremental_debug_state: if (build_options.enable_debug_extensions) IncrementalD
/// this timer must be temporarily paused and resumed later.
cur_analysis_timer: ?Compilation.Timer = null,
+codegen_task_pool: CodegenTaskPool,
+
generation: u32 = 0,
pub const IncrementalDebugState = struct {
@@ -895,12 +898,13 @@ pub const Namespace = struct {
ns: Namespace,
ip: *InternPool,
gpa: Allocator,
+ io: Io,
tid: Zcu.PerThread.Id,
name: InternPool.NullTerminatedString,
) !InternPool.NullTerminatedString {
const ns_name = Type.fromInterned(ns.owner_type).containerTypeName(ip);
if (name == .empty) return ns_name;
- return ip.getOrPutStringFmt(gpa, tid, "{f}.{f}", .{ ns_name.fmt(ip), name.fmt(ip) }, .no_embedded_nulls);
+ return ip.getOrPutStringFmt(gpa, io, tid, "{f}.{f}", .{ ns_name.fmt(ip), name.fmt(ip) }, .no_embedded_nulls);
}
};
@@ -1139,13 +1143,15 @@ pub const File = struct {
}
pub fn internFullyQualifiedName(file: File, pt: Zcu.PerThread) !InternPool.NullTerminatedString {
- const gpa = pt.zcu.gpa;
const ip = &pt.zcu.intern_pool;
- const string_bytes = ip.getLocal(pt.tid).getMutableStringBytes(gpa);
+ const comp = pt.zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const string_bytes = ip.getLocal(pt.tid).getMutableStringBytes(gpa, io);
var w: Writer = .fixed((try string_bytes.addManyAsSlice(file.fullyQualifiedNameLen()))[0]);
file.renderFullyQualifiedName(&w) catch unreachable;
assert(w.end == w.buffer.len);
- return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(w.end), .no_embedded_nulls);
+ return ip.getOrPutTrailingString(gpa, io, pt.tid, @intCast(w.end), .no_embedded_nulls);
}
pub const Index = InternPool.FileIndex;
@@ -2801,13 +2807,14 @@ pub const CompileError = error{
ComptimeBreak,
};
-pub fn init(zcu: *Zcu, thread_count: usize) !void {
- const gpa = zcu.gpa;
- try zcu.intern_pool.init(gpa, thread_count);
+pub fn init(zcu: *Zcu, gpa: Allocator, io: Io, thread_count: usize) !void {
+ try zcu.intern_pool.init(gpa, io, thread_count);
}
pub fn deinit(zcu: *Zcu) void {
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
{
const pt: Zcu.PerThread = .activate(zcu, .main);
defer pt.deactivate();
@@ -2897,7 +2904,7 @@ pub fn deinit(zcu: *Zcu) void {
zcu.incremental_debug_state.deinit(gpa);
}
}
- zcu.intern_pool.deinit(gpa);
+ zcu.intern_pool.deinit(gpa, io);
}
pub fn namespacePtr(zcu: *Zcu, index: Namespace.Index) *Namespace {
@@ -4442,7 +4449,7 @@ pub fn maybeUnresolveIes(zcu: *Zcu, func_index: InternPool.Index) !void {
try zcu.outdated_ready.put(gpa, unit, {});
}
}
- zcu.intern_pool.funcSetIesResolved(func_index, .none);
+ zcu.intern_pool.funcSetIesResolved(zcu.comp.io, func_index, .none);
}
}
@@ -4620,10 +4627,12 @@ pub fn codegenFail(
/// Takes ownership of `msg`, even on OOM.
pub fn codegenFailMsg(zcu: *Zcu, nav_index: InternPool.Nav.Index, msg: *ErrorMsg) CodegenFailError {
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
{
- zcu.comp.mutex.lock();
- defer zcu.comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
errdefer msg.deinit(gpa);
try zcu.failed_codegen.putNoClobber(gpa, nav_index, msg);
}
@@ -4632,8 +4641,10 @@ pub fn codegenFailMsg(zcu: *Zcu, nav_index: InternPool.Nav.Index, msg: *ErrorMsg
/// Asserts that `zcu.failed_codegen` contains the key `nav`, with the necessary lock held.
pub fn assertCodegenFailed(zcu: *Zcu, nav: InternPool.Nav.Index) void {
- zcu.comp.mutex.lock();
- defer zcu.comp.mutex.unlock();
+ const comp = zcu.comp;
+ const io = comp.io;
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
assert(zcu.failed_codegen.contains(nav));
}
@@ -4794,8 +4805,9 @@ const TrackedUnitSema = struct {
report_time: {
const sema_ns = zcu.cur_analysis_timer.?.finish() orelse break :report_time;
const zir_decl = tus.analysis_timer_decl orelse break :report_time;
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ const io = comp.io;
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
comp.time_report.?.stats.cpu_ns_sema += sema_ns;
const gop = comp.time_report.?.decl_sema_info.getOrPut(comp.gpa, zir_decl) catch |err| switch (err) {
error.OutOfMemory => {
@@ -4830,3 +4842,170 @@ pub fn trackUnitSema(zcu: *Zcu, name: []const u8, zir_inst: ?InternPool.TrackedI
.analysis_timer_decl = zir_inst,
};
}
+
+pub const CodegenTaskPool = struct {
+ const CodegenResult = PerThread.RunCodegenError!codegen.AnyMir;
+
+ /// In the worst observed case, MIR is around 50 times as large as AIR. More typically, the ratio is
+ /// around 20. Going by that 50x multiplier, and assuming we want to consume no more than 500 MiB of
+ /// memory on AIR/MIR, we see a limit of around 10 MiB of AIR in-flight.
+ const max_air_bytes_in_flight = 10 * 1024 * 1024;
+
+ const max_funcs_in_flight = @import("link.zig").Queue.buffer_size;
+
+ available_air_bytes: u32,
+
+ /// Locks the freelist and `available_air_bytes`.
+ mutex: Io.Mutex,
+
+ /// Signaled when an item is added to the freelist.
+ free_cond: Io.Condition,
+ /// Pre-allocated with enough capacity for all indices.
+ free: std.ArrayList(Index),
+
+ /// `.none` means this task is in the freelist. The `task_air_bytes` and
+ /// `task_futures` entries are `undefined`.
+ task_funcs: []InternPool.Index,
+ task_air_bytes: []u32,
+ task_futures: []Io.Future(CodegenResult),
+
+ pub fn init(arena: Allocator) Allocator.Error!CodegenTaskPool {
+ const task_funcs = try arena.alloc(InternPool.Index, max_funcs_in_flight);
+ const task_air_bytes = try arena.alloc(u32, max_funcs_in_flight);
+ const task_futures = try arena.alloc(Io.Future(CodegenResult), max_funcs_in_flight);
+ @memset(task_funcs, .none);
+
+ var free: std.ArrayList(Index) = try .initCapacity(arena, max_funcs_in_flight);
+ for (0..max_funcs_in_flight) |index| free.appendAssumeCapacity(@enumFromInt(index));
+
+ return .{
+ .available_air_bytes = max_air_bytes_in_flight,
+ .mutex = .init,
+ .free_cond = .init,
+ .free = free,
+ .task_funcs = task_funcs,
+ .task_air_bytes = task_air_bytes,
+ .task_futures = task_futures,
+ };
+ }
+
+ pub fn cancel(pool: *CodegenTaskPool, zcu: *const Zcu) void {
+ const io = zcu.comp.io;
+ for (
+ pool.task_funcs,
+ pool.task_air_bytes,
+ pool.task_futures,
+ ) |func, effective_air_bytes, *future| {
+ if (func == .none) continue;
+ pool.available_air_bytes += effective_air_bytes;
+ var mir = future.cancel(io) catch continue;
+ mir.deinit(zcu);
+ }
+ assert(pool.available_air_bytes == max_air_bytes_in_flight);
+ }
+
+ pub fn start(
+ pool: *CodegenTaskPool,
+ zcu: *Zcu,
+ func_index: InternPool.Index,
+ air: *Air,
+ /// If `true`, this function will take ownership of `air`, freeing it after codegen
+ /// completes; it is not assumed that `air` will outlive this function. If `false`,
+ /// codegen will operate on `air` via the given pointer, which it is assumed will
+ /// outline the codegen task.
+ move_air: bool,
+ ) Io.Cancelable!Index {
+ const io = zcu.comp.io;
+
+ // To avoid consuming an excessive amount of memory, there is a limit on the total number of AIR
+ // bytes which can be in the codegen/link pipeline at one time. If we exceed this limit, we must
+ // wait for codegen/link to finish some WIP functions so they catch up with us.
+ const actual_air_bytes: u32 = @intCast(air.instructions.len * 5 + air.extra.items.len * 4);
+ // We need to let all AIR through eventually, even if one function exceeds `max_air_bytes_in_flight`.
+ const effective_air_bytes: u32 = @min(actual_air_bytes, max_air_bytes_in_flight);
+ assert(effective_air_bytes > 0);
+
+ const index: Index = index: {
+ try pool.mutex.lock(io);
+ defer pool.mutex.unlock(io);
+
+ while (pool.free.items.len == 0 or pool.available_air_bytes < effective_air_bytes) {
+ // The linker thread needs to catch up!
+ try pool.free_cond.wait(io, &pool.mutex);
+ }
+
+ pool.available_air_bytes -= effective_air_bytes;
+ break :index pool.free.pop().?;
+ };
+
+ // No turning back now: we're incrementing `pending_codegen_jobs` and starting the worker.
+ errdefer comptime unreachable;
+
+ assert(zcu.pending_codegen_jobs.fetchAdd(1, .monotonic) > 0); // the "Code Generation" node is still active
+ assert(pool.task_funcs[@intFromEnum(index)] == .none);
+ pool.task_funcs[@intFromEnum(index)] = func_index;
+ pool.task_air_bytes[@intFromEnum(index)] = actual_air_bytes;
+ pool.task_futures[@intFromEnum(index)] = if (move_air) io.async(
+ workerCodegenOwnedAir,
+ .{ zcu, func_index, air.* },
+ ) else io.async(
+ workerCodegenExternalAir,
+ .{ zcu, func_index, air },
+ );
+
+ return index;
+ }
+ pub const Index = enum(u32) {
+ _,
+
+ /// Blocks until codegen has completed, successfully or otherwise.
+ /// The returned MIR is owned by the caller.
+ pub fn wait(
+ index: Index,
+ pool: *CodegenTaskPool,
+ io: Io,
+ ) PerThread.RunCodegenError!struct { InternPool.Index, codegen.AnyMir } {
+ const func = pool.task_funcs[@intFromEnum(index)];
+ assert(func != .none);
+ const effective_air_bytes = pool.task_air_bytes[@intFromEnum(index)];
+ const result = pool.task_futures[@intFromEnum(index)].await(io);
+
+ pool.task_funcs[@intFromEnum(index)] = .none;
+ pool.task_air_bytes[@intFromEnum(index)] = undefined;
+ pool.task_futures[@intFromEnum(index)] = undefined;
+
+ {
+ pool.mutex.lockUncancelable(io);
+ defer pool.mutex.unlock(io);
+ pool.available_air_bytes += effective_air_bytes;
+ pool.free.appendAssumeCapacity(index);
+ pool.free_cond.signal(io);
+ }
+
+ return .{ func, try result };
+ }
+ };
+ fn workerCodegenOwnedAir(
+ zcu: *Zcu,
+ func_index: InternPool.Index,
+ orig_air: Air,
+ ) CodegenResult {
+ // We own `air` now, so we are responsbile for freeing it.
+ var air = orig_air;
+ defer air.deinit(zcu.comp.gpa);
+ const tid = Compilation.getTid();
+ const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid));
+ defer pt.deactivate();
+ return pt.runCodegen(func_index, &air);
+ }
+ fn workerCodegenExternalAir(
+ zcu: *Zcu,
+ func_index: InternPool.Index,
+ air: *Air,
+ ) CodegenResult {
+ const tid = Compilation.getTid();
+ const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid));
+ defer pt.deactivate();
+ return pt.runCodegen(func_index, air);
+ }
+};
diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig
index 702e30dab0..2ad5bac01c 100644
--- a/src/Zcu/PerThread.zig
+++ b/src/Zcu/PerThread.zig
@@ -269,8 +269,8 @@ pub fn updateFile(
// Any potential AST errors are converted to ZIR errors when we run AstGen/ZonGen.
file.tree = try Ast.parse(gpa, source, file.getMode());
if (timer.finish()) |ns_parse| {
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
comp.time_report.?.stats.cpu_ns_parse += ns_parse;
}
@@ -295,8 +295,8 @@ pub fn updateFile(
},
}
if (timer.finish()) |ns_astgen| {
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
comp.time_report.?.stats.cpu_ns_astgen += ns_astgen;
}
@@ -315,8 +315,8 @@ pub fn updateFile(
switch (file.getMode()) {
.zig => {
if (file.zir.?.hasCompileErrors()) {
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
try zcu.failed_files.putNoClobber(gpa, file_index, null);
}
if (file.zir.?.loweringFailed()) {
@@ -328,8 +328,8 @@ pub fn updateFile(
.zon => {
if (file.zoir.?.hasCompileErrors()) {
file.status = .astgen_failure;
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
try zcu.failed_files.putNoClobber(gpa, file_index, null);
} else {
file.status = .success;
@@ -415,7 +415,8 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const ip = &zcu.intern_pool;
- const gpa = zcu.gpa;
+ const gpa = comp.gpa;
+ const io = comp.io;
// We need to visit every updated File for every TrackedInst in InternPool.
// This only includes Zig files; ZON files are omitted.
@@ -459,7 +460,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
return;
for (ip.locals, 0..) |*local, tid| {
- const tracked_insts_list = local.getMutableTrackedInsts(gpa);
+ const tracked_insts_list = local.getMutableTrackedInsts(gpa, io);
for (tracked_insts_list.viewAllowEmpty().items(.@"0"), 0..) |*tracked_inst, tracked_inst_unwrapped_index| {
const file_index = tracked_inst.file;
const updated_file = updated_files.get(file_index) orelse continue;
@@ -530,6 +531,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
if (old_decl.name == .empty) continue;
const name_ip = try zcu.intern_pool.getOrPutString(
zcu.gpa,
+ io,
pt.tid,
old_zir.nullTerminatedString(old_decl.name),
.no_embedded_nulls,
@@ -545,6 +547,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
if (new_decl.name == .empty) continue;
const name_ip = try zcu.intern_pool.getOrPutString(
zcu.gpa,
+ io,
pt.tid,
new_zir.nullTerminatedString(new_decl.name),
.no_embedded_nulls,
@@ -575,7 +578,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
}
}
- try ip.rehashTrackedInsts(gpa, pt.tid);
+ try ip.rehashTrackedInsts(gpa, io, pt.tid);
for (updated_files.keys(), updated_files.values()) |file_index, updated_file| {
const file = updated_file.file;
@@ -700,7 +703,9 @@ pub fn ensureMemoizedStateUpToDate(pt: Zcu.PerThread, stage: InternPool.Memoized
fn analyzeMemoizedState(pt: Zcu.PerThread, stage: InternPool.MemoizedStateStage) Zcu.CompileError!bool {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const unit: AnalUnit = .wrap(.{ .memoized_state = stage });
@@ -716,7 +721,7 @@ fn analyzeMemoizedState(pt: Zcu.PerThread, stage: InternPool.MemoizedStateStage)
const std_type: Type = .fromInterned(zcu.fileRootType(std_file_index));
const std_namespace = std_type.getNamespaceIndex(zcu);
try pt.ensureNamespaceUpToDate(std_namespace);
- const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls);
+ const builtin_str = try ip.getOrPutString(gpa, io, pt.tid, "builtin", .no_embedded_nulls);
const builtin_nav = zcu.namespacePtr(std_namespace).pub_decls.getKeyAdapted(builtin_str, Zcu.Namespace.NameAdapter{ .zcu = zcu }) orelse
@panic("lib/std.zig is corrupt and missing 'builtin'");
try pt.ensureNavValUpToDate(builtin_nav);
@@ -857,8 +862,10 @@ pub fn ensureComptimeUnitUpToDate(pt: Zcu.PerThread, cu_id: InternPool.ComptimeU
/// to `transitive_failed_analysis` if necessary.
fn analyzeComptimeUnit(pt: Zcu.PerThread, cu_id: InternPool.ComptimeUnit.Id) Zcu.CompileError!void {
const zcu = pt.zcu;
- const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const anal_unit: AnalUnit = .wrap(.{ .@"comptime" = cu_id });
const comptime_unit = ip.getComptimeUnit(cu_id);
@@ -909,7 +916,7 @@ fn analyzeComptimeUnit(pt: Zcu.PerThread, cu_id: InternPool.ComptimeUnit.Id) Zcu
.r = .{ .simple = .comptime_keyword },
} },
.src_base_inst = comptime_unit.zir_index,
- .type_name_ctx = try ip.getOrPutStringFmt(gpa, pt.tid, "{f}.comptime", .{
+ .type_name_ctx = try ip.getOrPutStringFmt(gpa, io, pt.tid, "{f}.comptime", .{
Type.fromInterned(zcu.namespacePtr(comptime_unit.namespace).owner_type).containerTypeName(ip).fmt(ip),
}, .no_embedded_nulls),
};
@@ -1087,8 +1094,10 @@ pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu
fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileError!struct { val_changed: bool } {
const zcu = pt.zcu;
- const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const anal_unit: AnalUnit = .wrap(.{ .nav_val = nav_id });
const old_nav = ip.getNav(nav_id);
@@ -1253,7 +1262,7 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
break :val .fromInterned(try pt.getExtern(.{
.name = old_nav.name,
.ty = nav_ty.toIntern(),
- .lib_name = try ip.getOrPutStringOpt(gpa, pt.tid, lib_name, .no_embedded_nulls),
+ .lib_name = try ip.getOrPutStringOpt(gpa, io, pt.tid, lib_name, .no_embedded_nulls),
.is_threadlocal = zir_decl.is_threadlocal,
.linkage = .strong,
.visibility = .default,
@@ -1310,7 +1319,7 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
}
}
- ip.resolveNavValue(nav_id, .{
+ ip.resolveNavValue(io, nav_id, .{
.val = nav_val.toIntern(),
.is_const = is_const,
.alignment = modifiers.alignment,
@@ -1327,7 +1336,7 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
if (zir_decl.linkage == .@"export") {
const export_src = block.src(.{ .token_offset = @enumFromInt(@intFromBool(zir_decl.is_pub)) });
const name_slice = zir.nullTerminatedString(zir_decl.name);
- const name_ip = try ip.getOrPutString(gpa, pt.tid, name_slice, .no_embedded_nulls);
+ const name_ip = try ip.getOrPutString(gpa, io, pt.tid, name_slice, .no_embedded_nulls);
try sema.analyzeExport(&block, export_src, .{ .name = name_ip }, nav_id);
}
@@ -1472,7 +1481,9 @@ pub fn ensureNavTypeUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zc
fn analyzeNavType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileError!struct { type_changed: bool } {
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const anal_unit: AnalUnit = .wrap(.{ .nav_ty = nav_id });
@@ -1579,7 +1590,7 @@ fn analyzeNavType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileEr
if (!changed) return .{ .type_changed = false };
- ip.resolveNavType(nav_id, .{
+ ip.resolveNavType(io, nav_id, .{
.type = resolved_ty.toIntern(),
.is_const = is_const,
.alignment = modifiers.alignment,
@@ -1775,6 +1786,7 @@ fn createFileRootStruct(
) Allocator.Error!InternPool.Index {
const zcu = pt.zcu;
const gpa = zcu.gpa;
+ const io = zcu.comp.io;
const ip = &zcu.intern_pool;
const file = zcu.fileByIndex(file_index);
const extended = file.zir.?.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended;
@@ -1797,11 +1809,11 @@ fn createFileRootStruct(
const decls = file.zir.?.bodySlice(extra_index, decls_len);
extra_index += decls_len;
- const tracked_inst = try ip.trackZir(gpa, pt.tid, .{
+ const tracked_inst = try ip.trackZir(gpa, io, pt.tid, .{
.file = file_index,
.inst = .main_struct_inst,
});
- const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{
+ const wip_ty = switch (try ip.getStructType(gpa, io, pt.tid, .{
.layout = .auto,
.fields_len = fields_len,
.known_non_opv = small.known_non_opv,
@@ -1916,7 +1928,9 @@ pub fn discoverImport(
},
} {
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
if (!mem.endsWith(u8, import_string, ".zig") and !mem.endsWith(u8, import_string, ".zon")) {
return .module;
@@ -1926,8 +1940,8 @@ pub fn discoverImport(
errdefer new_path.deinit(gpa);
// We're about to do a GOP on `import_table`, so we need the mutex.
- zcu.comp.mutex.lock();
- defer zcu.comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
const gop = try zcu.import_table.getOrPutAdapted(gpa, new_path, Zcu.ImportTableAdapter{ .zcu = zcu });
errdefer _ = zcu.import_table.pop();
@@ -1942,7 +1956,7 @@ pub fn discoverImport(
const new_file = try gpa.create(Zcu.File);
errdefer gpa.destroy(new_file);
- const new_file_index = try zcu.intern_pool.createFile(gpa, pt.tid, .{
+ const new_file_index = try zcu.intern_pool.createFile(gpa, io, pt.tid, .{
.bin_digest = new_path.digest(),
.file = new_file,
.root_type = .none,
@@ -2027,7 +2041,9 @@ pub fn populateModuleRootTable(pt: Zcu.PerThread) error{
IllegalZigImport,
}!void {
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
// We'll initially add [mod, undefined] pairs, and when we reach the pair while
// iterating, rewrite the undefined value.
@@ -2085,7 +2101,7 @@ pub fn populateModuleRootTable(pt: Zcu.PerThread) error{
const new_file = try gpa.create(Zcu.File);
errdefer gpa.destroy(new_file);
- const new_file_index = try zcu.intern_pool.createFile(gpa, pt.tid, .{
+ const new_file_index = try zcu.intern_pool.createFile(gpa, io, pt.tid, .{
.bin_digest = path.digest(),
.file = new_file,
.root_type = .none,
@@ -2291,7 +2307,8 @@ pub fn computeAliveFiles(pt: Zcu.PerThread) Allocator.Error!bool {
pub fn updateBuiltinModule(pt: Zcu.PerThread, opts: Builtin) Allocator.Error!void {
const zcu = pt.zcu;
const comp = zcu.comp;
- const gpa = zcu.gpa;
+ const gpa = comp.gpa;
+ const io = comp.io;
const gop = try zcu.builtin_modules.getOrPut(gpa, opts.hash());
if (gop.found_existing) return; // the `File` is up-to-date
@@ -2330,7 +2347,7 @@ pub fn updateBuiltinModule(pt: Zcu.PerThread, opts: Builtin) Allocator.Error!voi
.zoir_invalidated = false,
};
- const file_index = try zcu.intern_pool.createFile(gpa, pt.tid, .{
+ const file_index = try zcu.intern_pool.createFile(gpa, io, pt.tid, .{
.bin_digest = path.digest(),
.file = file,
.root_type = .none,
@@ -2469,7 +2486,7 @@ fn updateEmbedFileInner(
// The loaded bytes of the file, including a sentinel 0 byte.
const ip_str: InternPool.String = str: {
- const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa);
+ const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa, io);
const old_len = string_bytes.mutate.len;
errdefer string_bytes.shrinkRetainingCapacity(old_len);
const bytes = (try string_bytes.addManyAsSlice(size_plus_one))[0];
@@ -2480,7 +2497,7 @@ fn updateEmbedFileInner(
error.EndOfStream => return error.UnexpectedEof,
};
bytes[size] = 0;
- break :str try ip.getOrPutTrailingString(gpa, tid, @intCast(bytes.len), .maybe_embedded_nulls);
+ break :str try ip.getOrPutTrailingString(gpa, io, tid, @intCast(bytes.len), .maybe_embedded_nulls);
};
if (ip_str_out) |p| p.* = ip_str;
@@ -2516,7 +2533,8 @@ fn newEmbedFile(
) !*Zcu.EmbedFile {
const zcu = pt.zcu;
const comp = zcu.comp;
- const gpa = zcu.gpa;
+ const io = comp.io;
+ const gpa = comp.gpa;
const ip = &zcu.intern_pool;
const new_file = try gpa.create(Zcu.EmbedFile);
@@ -2549,8 +2567,8 @@ fn newEmbedFile(
const path_str = try path.toAbsolute(comp.dirs, gpa);
defer gpa.free(path_str);
- whole.cache_manifest_mutex.lock();
- defer whole.cache_manifest_mutex.unlock();
+ try whole.cache_manifest_mutex.lock(io);
+ defer whole.cache_manifest_mutex.unlock(io);
man.addFilePostContents(path_str, contents, new_file.stat) catch |err| switch (err) {
error.Unexpected => unreachable,
@@ -2647,13 +2665,15 @@ const ScanDeclIter = struct {
fn avoidNameConflict(iter: *ScanDeclIter, comptime fmt: []const u8, args: anytype) !InternPool.NullTerminatedString {
const pt = iter.pt;
- const gpa = pt.zcu.gpa;
const ip = &pt.zcu.intern_pool;
- var name = try ip.getOrPutStringFmt(gpa, pt.tid, fmt, args, .no_embedded_nulls);
+ const comp = pt.zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ var name = try ip.getOrPutStringFmt(gpa, io, pt.tid, fmt, args, .no_embedded_nulls);
var gop = try iter.seen_decls.getOrPut(gpa, name);
var next_suffix: u32 = 0;
while (gop.found_existing) {
- name = try ip.getOrPutStringFmt(gpa, pt.tid, "{f}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls);
+ name = try ip.getOrPutStringFmt(gpa, io, pt.tid, "{f}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls);
gop = try iter.seen_decls.getOrPut(gpa, name);
next_suffix += 1;
}
@@ -2669,7 +2689,8 @@ const ScanDeclIter = struct {
const comp = zcu.comp;
const namespace_index = iter.namespace_index;
const namespace = zcu.namespacePtr(namespace_index);
- const gpa = zcu.gpa;
+ const gpa = comp.gpa;
+ const io = comp.io;
const file = namespace.fileScope(zcu);
const zir = file.zir.?;
const ip = &zcu.intern_pool;
@@ -2697,6 +2718,7 @@ const ScanDeclIter = struct {
if (iter.pass != .named) return;
const name = try ip.getOrPutString(
gpa,
+ io,
pt.tid,
zir.nullTerminatedString(decl.name),
.no_embedded_nulls,
@@ -2706,7 +2728,7 @@ const ScanDeclIter = struct {
},
};
- const tracked_inst = try ip.trackZir(gpa, pt.tid, .{
+ const tracked_inst = try ip.trackZir(gpa, io, pt.tid, .{
.file = namespace.file_scope,
.inst = decl_inst,
});
@@ -2718,7 +2740,7 @@ const ScanDeclIter = struct {
const cu = if (existing_unit) |eu|
eu.unwrap().@"comptime"
else
- try ip.createComptimeUnit(gpa, pt.tid, tracked_inst, namespace_index);
+ try ip.createComptimeUnit(gpa, io, pt.tid, tracked_inst, namespace_index);
const unit: AnalUnit = .wrap(.{ .@"comptime" = cu });
@@ -2737,9 +2759,9 @@ const ScanDeclIter = struct {
},
else => unit: {
const name = maybe_name.unwrap().?;
- const fqn = try namespace.internFullyQualifiedName(ip, gpa, pt.tid, name);
+ const fqn = try namespace.internFullyQualifiedName(ip, gpa, io, pt.tid, name);
const nav = if (existing_unit) |eu| eu.unwrap().nav_val else nav: {
- const nav = try ip.createDeclNav(gpa, pt.tid, name, fqn, tracked_inst, namespace_index);
+ const nav = try ip.createDeclNav(gpa, io, pt.tid, name, fqn, tracked_inst, namespace_index);
if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newNav(zcu, nav);
break :nav nav;
};
@@ -2798,7 +2820,9 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
defer tracy.end();
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const anal_unit = AnalUnit.wrap(.{ .func = func_index });
@@ -2810,9 +2834,9 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit);
- func.setAnalyzed(ip);
+ func.setAnalyzed(ip, io);
if (func.analysisUnordered(ip).inferred_error_set) {
- func.setResolvedErrorSet(ip, .none);
+ func.setResolvedErrorSet(ip, io, .none);
}
if (zcu.comp.time_report) |*tr| {
@@ -2872,7 +2896,7 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
}
// reset in case calls to errorable functions are removed.
- ip.funcSetHasErrorTrace(func_index, fn_ty_info.cc == .auto);
+ ip.funcSetHasErrorTrace(io, func_index, fn_ty_info.cc == .auto);
// First few indexes of extra are reserved and set at the end.
const reserved_count = @typeInfo(Air.ExtraIndex).@"enum".fields.len;
@@ -2971,7 +2995,7 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
});
}
- func.setBranchHint(ip, sema.branch_hint orelse .none);
+ func.setBranchHint(ip, io, sema.branch_hint orelse .none);
if (zcu.comp.config.any_error_tracing and func.analysisUnordered(ip).has_error_trace and fn_ty_info.cc != .auto) {
// We're using an error trace, but didn't start out with one from the caller.
@@ -3005,7 +3029,7 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
else => |e| return e,
};
assert(ies.resolved != .none);
- func.setResolvedErrorSet(ip, ies.resolved);
+ func.setResolvedErrorSet(ip, io, ies.resolved);
}
assert(zcu.analysis_in_progress.swapRemove(anal_unit));
@@ -3036,7 +3060,8 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
}
pub fn createNamespace(pt: Zcu.PerThread, initialization: Zcu.Namespace) !Zcu.Namespace.Index {
- return pt.zcu.intern_pool.createNamespace(pt.zcu.gpa, pt.tid, initialization);
+ const comp = pt.zcu.comp;
+ return pt.zcu.intern_pool.createNamespace(comp.gpa, comp.io, pt.tid, initialization);
}
pub fn destroyNamespace(pt: Zcu.PerThread, namespace_index: Zcu.Namespace.Index) void {
@@ -3047,11 +3072,15 @@ pub fn getErrorValue(
pt: Zcu.PerThread,
name: InternPool.NullTerminatedString,
) Allocator.Error!Zcu.ErrorInt {
- return pt.zcu.intern_pool.getErrorValue(pt.zcu.gpa, pt.tid, name);
+ const comp = pt.zcu.comp;
+ return pt.zcu.intern_pool.getErrorValue(comp.gpa, comp.io, pt.tid, name);
}
pub fn getErrorValueFromSlice(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Zcu.ErrorInt {
- return pt.getErrorValue(try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, name));
+ const comp = pt.zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ return pt.getErrorValue(try pt.zcu.intern_pool.getOrPutString(gpa, io, name));
}
/// Removes any entry from `Zcu.failed_files` associated with `file`. Acquires `Compilation.mutex` as needed.
@@ -3078,8 +3107,10 @@ fn lockAndClearFileCompileError(pt: Zcu.PerThread, file_index: Zcu.File.Index, f
return;
}
- pt.zcu.comp.mutex.lock();
- defer pt.zcu.comp.mutex.unlock();
+ const comp = pt.zcu.comp;
+ const io = comp.io;
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
if (pt.zcu.failed_files.fetchSwapRemove(file_index)) |kv| {
assert(maybe_has_error); // the runtime safety case above
if (kv.value) |msg| pt.zcu.gpa.free(msg); // delete previous error message
@@ -3266,7 +3297,9 @@ fn processExportsInner(
pub fn populateTestFunctions(pt: Zcu.PerThread) Allocator.Error!void {
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
// Our job is to correctly set the value of the `test_functions` declaration if it has been
@@ -3284,7 +3317,7 @@ pub fn populateTestFunctions(pt: Zcu.PerThread) Allocator.Error!void {
const builtin_namespace = Type.fromInterned(builtin_root_type).getNamespace(zcu).unwrap().?;
// We know that the namespace has a `test_functions`...
const nav_index = zcu.namespacePtr(builtin_namespace).pub_decls.getKeyAdapted(
- try ip.getOrPutString(gpa, pt.tid, "test_functions", .no_embedded_nulls),
+ try ip.getOrPutString(gpa, io, pt.tid, "test_functions", .no_embedded_nulls),
Zcu.Namespace.NameAdapter{ .zcu = zcu },
).?;
// ...but it might not be populated, so let's check that!
@@ -3392,7 +3425,7 @@ pub fn populateTestFunctions(pt: Zcu.PerThread) Allocator.Error!void {
} }),
.len = (try pt.intValue(Type.usize, zcu.test_functions.count())).toIntern(),
} });
- ip.mutateVarInit(test_fns_val.toIntern(), new_init);
+ ip.mutateVarInit(io, test_fns_val.toIntern(), new_init);
}
// The linker thread is not running, so we actually need to dispatch this task directly.
@import("../link.zig").linkTestFunctionsNav(pt, nav_index);
@@ -3407,7 +3440,9 @@ pub fn reportRetryableFileError(
args: anytype,
) error{OutOfMemory}!void {
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const file = zcu.fileByIndex(file_index);
@@ -3417,8 +3452,8 @@ pub fn reportRetryableFileError(
errdefer gpa.free(msg);
const old_msg: ?[]u8 = old_msg: {
- zcu.comp.mutex.lock();
- defer zcu.comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
const gop = try zcu.failed_files.getOrPut(gpa, file_index);
const old: ?[]u8 = if (gop.found_existing) old: {
@@ -3433,12 +3468,8 @@ pub fn reportRetryableFileError(
/// Shortcut for calling `intern_pool.get`.
pub fn intern(pt: Zcu.PerThread, key: InternPool.Key) Allocator.Error!InternPool.Index {
- return pt.zcu.intern_pool.get(pt.zcu.gpa, pt.tid, key);
-}
-
-/// Shortcut for calling `intern_pool.getUnion`.
-pub fn internUnion(pt: Zcu.PerThread, un: InternPool.Key.Union) Allocator.Error!InternPool.Index {
- return pt.zcu.intern_pool.getUnion(pt.zcu.gpa, pt.tid, un);
+ const comp = pt.zcu.comp;
+ return pt.zcu.intern_pool.get(comp.gpa, comp.io, pt.tid, key);
}
/// Essentially a shortcut for calling `intern_pool.getCoerced`.
@@ -3446,6 +3477,9 @@ pub fn internUnion(pt: Zcu.PerThread, un: InternPool.Key.Union) Allocator.Error!
/// this because it requires potentially pushing to the job queue.
pub fn getCoerced(pt: Zcu.PerThread, val: Value, new_ty: Type) Allocator.Error!Value {
const ip = &pt.zcu.intern_pool;
+ const comp = pt.zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
switch (ip.indexToKey(val.toIntern())) {
.@"extern" => |e| {
const coerced = try pt.getExtern(.{
@@ -3468,7 +3502,7 @@ pub fn getCoerced(pt: Zcu.PerThread, val: Value, new_ty: Type) Allocator.Error!V
},
else => {},
}
- return Value.fromInterned(try ip.getCoerced(pt.zcu.gpa, pt.tid, val.toIntern(), new_ty.toIntern()));
+ return Value.fromInterned(try ip.getCoerced(gpa, io, pt.tid, val.toIntern(), new_ty.toIntern()));
}
pub fn intType(pt: Zcu.PerThread, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type {
@@ -3566,7 +3600,8 @@ pub fn adjustPtrTypeChild(pt: Zcu.PerThread, ptr_ty: Type, new_child: Type) Allo
}
pub fn funcType(pt: Zcu.PerThread, key: InternPool.GetFuncTypeKey) Allocator.Error!Type {
- return Type.fromInterned(try pt.zcu.intern_pool.getFuncType(pt.zcu.gpa, pt.tid, key));
+ const comp = pt.zcu.comp;
+ return .fromInterned(try pt.zcu.intern_pool.getFuncType(comp.gpa, comp.io, pt.tid, key));
}
/// Use this for `anyframe->T` only.
@@ -3584,7 +3619,8 @@ pub fn errorUnionType(pt: Zcu.PerThread, error_set_ty: Type, payload_ty: Type) A
pub fn singleErrorSetType(pt: Zcu.PerThread, name: InternPool.NullTerminatedString) Allocator.Error!Type {
const names: *const [1]InternPool.NullTerminatedString = &name;
- return Type.fromInterned(try pt.zcu.intern_pool.getErrorSetType(pt.zcu.gpa, pt.tid, names));
+ const comp = pt.zcu.comp;
+ return Type.fromInterned(try pt.zcu.intern_pool.getErrorSetType(comp.gpa, comp.io, pt.tid, names));
}
/// Sorts `names` in place.
@@ -3598,7 +3634,8 @@ pub fn errorSetFromUnsortedNames(
{},
InternPool.NullTerminatedString.indexLessThan,
);
- const new_ty = try pt.zcu.intern_pool.getErrorSetType(pt.zcu.gpa, pt.tid, names);
+ const comp = pt.zcu.comp;
+ const new_ty = try pt.zcu.intern_pool.getErrorSetType(comp.gpa, comp.io, pt.tid, names);
return Type.fromInterned(new_ty);
}
@@ -3709,9 +3746,17 @@ pub fn intValue_i64(pt: Zcu.PerThread, ty: Type, x: i64) Allocator.Error!Value {
} }));
}
+/// Shortcut for calling `intern_pool.getUnion`.
+/// TODO: remove either this or `unionValue`.
+pub fn internUnion(pt: Zcu.PerThread, un: InternPool.Key.Union) Allocator.Error!InternPool.Index {
+ const comp = pt.zcu.comp;
+ return pt.zcu.intern_pool.getUnion(comp.gpa, comp.io, pt.tid, un);
+}
+
+/// TODO: remove either this or `internUnion`.
pub fn unionValue(pt: Zcu.PerThread, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value {
- const zcu = pt.zcu;
- return Value.fromInterned(try zcu.intern_pool.getUnion(zcu.gpa, pt.tid, .{
+ const comp = pt.zcu.comp;
+ return Value.fromInterned(try pt.zcu.intern_pool.getUnion(comp.gpa, comp.io, pt.tid, .{
.ty = union_ty.toIntern(),
.tag = tag.toIntern(),
.val = val.toIntern(),
@@ -3771,12 +3816,12 @@ pub fn nullValue(pt: Zcu.PerThread, opt_ty: Type) Allocator.Error!Value {
/// `ty` is an integer or a vector of integers.
pub fn overflowArithmeticTupleType(pt: Zcu.PerThread, ty: Type) !Type {
const zcu = pt.zcu;
- const ip = &zcu.intern_pool;
+ const comp = zcu.comp;
const ov_ty: Type = if (ty.zigTypeTag(zcu) == .vector) try pt.vectorType(.{
.len = ty.vectorLen(zcu),
.child = .u1_type,
}) else .u1;
- const tuple_ty = try ip.getTupleType(zcu.gpa, pt.tid, .{
+ const tuple_ty = try zcu.intern_pool.getTupleType(comp.gpa, comp.io, pt.tid, .{
.types = &.{ ty.toIntern(), ov_ty.toIntern() },
.values = &.{ .none, .none },
});
@@ -3872,12 +3917,14 @@ pub fn navPtrType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Allocator.Err
/// If necessary, the new `Nav` is queued for codegen.
/// `key.owner_nav` is ignored and may be `undefined`.
pub fn getExtern(pt: Zcu.PerThread, key: InternPool.Key.Extern) Allocator.Error!InternPool.Index {
- const result = try pt.zcu.intern_pool.getExtern(pt.zcu.gpa, pt.tid, key);
+ const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const result = try zcu.intern_pool.getExtern(comp.gpa, comp.io, pt.tid, key);
if (result.new_nav.unwrap()) |nav| {
// This job depends on any resolve_type_fully jobs queued up before it.
- pt.zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
- try pt.zcu.comp.queueJob(.{ .link_nav = nav });
- if (pt.zcu.comp.debugIncremental()) try pt.zcu.incremental_debug_state.newNav(pt.zcu, nav);
+ comp.link_prog_node.increaseEstimatedTotalItems(1);
+ try comp.queueJob(.{ .link_nav = nav });
+ if (comp.debugIncremental()) try zcu.incremental_debug_state.newNav(zcu, nav);
}
return result.index;
}
@@ -3966,7 +4013,9 @@ fn recreateStructType(
key: InternPool.Key.NamespaceType.Declared,
) Allocator.Error!InternPool.Index {
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const inst_info = key.zir_index.resolveFull(ip).?;
@@ -3995,7 +4044,7 @@ fn recreateStructType(
const struct_obj = ip.loadStructType(old_ty);
- const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{
+ const wip_ty = switch (try ip.getStructType(gpa, io, pt.tid, .{
.layout = small.layout,
.fields_len = fields_len,
.known_non_opv = small.known_non_opv,
@@ -4042,7 +4091,9 @@ fn recreateUnionType(
key: InternPool.Key.NamespaceType.Declared,
) Allocator.Error!InternPool.Index {
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const inst_info = key.zir_index.resolveFull(ip).?;
@@ -4075,7 +4126,7 @@ fn recreateUnionType(
const namespace_index = union_obj.namespace;
- const wip_ty = switch (try ip.getUnionType(gpa, pt.tid, .{
+ const wip_ty = switch (try ip.getUnionType(gpa, io, pt.tid, .{
.flags = .{
.layout = small.layout,
.status = .none,
@@ -4133,7 +4184,9 @@ fn recreateEnumType(
key: InternPool.Key.NamespaceType.Declared,
) (Allocator.Error || Io.Cancelable)!InternPool.Index {
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const inst_info = key.zir_index.resolveFull(ip).?;
@@ -4197,7 +4250,7 @@ fn recreateEnumType(
const namespace_index = enum_obj.namespace;
- const wip_ty = switch (try ip.getEnumType(gpa, pt.tid, .{
+ const wip_ty = switch (try ip.getEnumType(gpa, io, pt.tid, .{
.has_values = any_values,
.tag_mode = if (small.nonexhaustive)
.nonexhaustive
@@ -4404,7 +4457,7 @@ pub fn refValue(pt: Zcu.PerThread, val: InternPool.Index) Zcu.SemaError!InternPo
pub fn addDependency(pt: Zcu.PerThread, unit: AnalUnit, dependee: InternPool.Dependee) Allocator.Error!void {
const zcu = pt.zcu;
- const gpa = zcu.gpa;
+ const gpa = zcu.comp.gpa;
try zcu.intern_pool.addDependency(gpa, unit, dependee);
if (zcu.comp.debugIncremental()) {
const info = try zcu.incremental_debug_state.getUnitInfo(gpa, unit);
@@ -4412,50 +4465,38 @@ pub fn addDependency(pt: Zcu.PerThread, unit: AnalUnit, dependee: InternPool.Dep
}
}
-/// Performs code generation, which comes after `Sema` but before `link` in the pipeline.
-/// This part of the pipeline is self-contained/"pure", so can be run in parallel with most
-/// other code. This function is currently run either on the main thread, or on a separate
-/// codegen thread, depending on whether the backend supports `Zcu.Feature.separate_thread`.
-pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air, out: *@import("../link.zig").ZcuTask.LinkFunc.SharedMir) void {
+pub const RunCodegenError = Io.Cancelable || error{AlreadyReported};
+
+/// Performs code generation, which comes after `Sema` but before `link` in the pipeline. This part
+/// of the pipeline is self-contained and can usually be run concurrently with other components.
+///
+/// This function is called asynchronously by `Zcu.CodegenTaskPool.start` and awaited by the linker.
+/// However, if the codegen backend does not support `Zcu.Feature.separate_thread`, then
+/// `Compilation.processOneJob` will immediately await the result of the linker task, meaning the
+/// pipeline becomes effectively single-threaded.
+pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) RunCodegenError!codegen.AnyMir {
const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const io = comp.io;
crash_report.CodegenFunc.start(zcu, func_index);
defer crash_report.CodegenFunc.stop(func_index);
- var timer = zcu.comp.startTimer();
+ var timer = comp.startTimer();
- const success: bool = if (runCodegenInner(pt, func_index, air)) |mir| success: {
- out.value = mir;
- break :success true;
- } else |err| success: {
- switch (err) {
- error.OutOfMemory => zcu.comp.setAllocFailure(),
- error.CodegenFail => zcu.assertCodegenFailed(zcu.funcInfo(func_index).owner_nav),
- error.NoLinkFile => assert(zcu.comp.bin_file == null),
- error.BackendDoesNotProduceMir => switch (target_util.zigBackend(
- &zcu.root_mod.resolved_target.result,
- zcu.comp.config.use_llvm,
- )) {
- else => unreachable, // assertion failure
- .stage2_spirv,
- .stage2_llvm,
- => {},
- },
- }
- break :success false;
- };
+ const codegen_result = runCodegenInner(pt, func_index, air);
if (timer.finish()) |ns_codegen| report_time: {
const ip = &zcu.intern_pool;
const nav = ip.indexToKey(func_index).func.owner_nav;
const zir_decl = ip.getNav(nav).srcInst(ip);
- zcu.comp.mutex.lock();
- defer zcu.comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
const tr = &zcu.comp.time_report.?;
tr.stats.cpu_ns_codegen += ns_codegen;
- const gop = tr.decl_codegen_ns.getOrPut(zcu.gpa, zir_decl) catch |err| switch (err) {
+ const gop = tr.decl_codegen_ns.getOrPut(comp.gpa, zir_decl) catch |err| switch (err) {
error.OutOfMemory => {
- zcu.comp.setAllocFailure();
+ comp.setAllocFailure();
break :report_time;
},
};
@@ -4463,14 +4504,29 @@ pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air, ou
gop.value_ptr.* += ns_codegen;
}
- // release `out.value` with this store; synchronizes with acquire loads in `link`
- out.status.store(if (success) .ready else .failed, .release);
- zcu.comp.link_task_queue.mirReady(zcu.comp, func_index, out);
if (zcu.pending_codegen_jobs.rmw(.Sub, 1, .monotonic) == 1) {
// Decremented to 0, so all done.
zcu.codegen_prog_node.end();
zcu.codegen_prog_node = .none;
}
+
+ return codegen_result catch |err| {
+ switch (err) {
+ error.OutOfMemory => comp.setAllocFailure(),
+ error.CodegenFail => zcu.assertCodegenFailed(zcu.funcInfo(func_index).owner_nav),
+ error.NoLinkFile => assert(comp.bin_file == null),
+ error.BackendDoesNotProduceMir => switch (target_util.zigBackend(
+ &zcu.root_mod.resolved_target.result,
+ comp.config.use_llvm,
+ )) {
+ else => unreachable, // assertion failure
+ .stage2_spirv,
+ .stage2_llvm,
+ => {},
+ },
+ }
+ return error.AlreadyReported;
+ };
}
fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) error{
OutOfMemory,
@@ -4527,7 +4583,7 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
// "emit" step because LLVM does not support incremental linking. Our linker (LLD or self-hosted)
// will just see the ZCU object file which LLVM ultimately emits.
if (zcu.llvm_object) |llvm_object| {
- assert(pt.tid == .main); // LLVM has a lot of shared state
+ assert(zcu.pending_codegen_jobs.load(.monotonic) == 2); // only one codegen at a time (but the value is 2 because 1 is the base)
try llvm_object.updateFunc(pt, func_index, air, &liveness);
return error.BackendDoesNotProduceMir;
}
@@ -4536,7 +4592,7 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
// Just like LLVM, the SPIR-V backend can't multi-threaded due to SPIR-V design limitations.
if (lf.cast(.spirv)) |spirv_file| {
- assert(pt.tid == .main); // SPIR-V has a lot of shared state
+ assert(zcu.pending_codegen_jobs.load(.monotonic) == 2); // only one codegen at a time (but the value is 2 because 1 is the base)
spirv_file.updateFunc(pt, func_index, air, &liveness) catch |err| {
switch (err) {
error.OutOfMemory => comp.link_diags.setAllocFailure(),
diff --git a/src/codegen/spirv/CodeGen.zig b/src/codegen/spirv/CodeGen.zig
index c34dd96e25..bf285d0e0e 100644
--- a/src/codegen/spirv/CodeGen.zig
+++ b/src/codegen/spirv/CodeGen.zig
@@ -2270,6 +2270,9 @@ fn buildWideMul(
) !struct { Temporary, Temporary } {
const pt = cg.pt;
const zcu = cg.module.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const target = cg.module.zcu.getTarget();
const ip = &zcu.intern_pool;
@@ -2297,14 +2300,14 @@ fn buildWideMul(
};
for (0..ops) |i| {
- try cg.body.emit(cg.module.gpa, .OpIMul, .{
+ try cg.body.emit(gpa, .OpIMul, .{
.id_result_type = arith_op_ty_id,
.id_result = value_results.at(i),
.operand_1 = lhs_op.at(i),
.operand_2 = rhs_op.at(i),
});
- try cg.body.emit(cg.module.gpa, .OpExtInst, .{
+ try cg.body.emit(gpa, .OpExtInst, .{
.id_result_type = arith_op_ty_id,
.id_result = overflow_results.at(i),
.set = set,
@@ -2316,7 +2319,7 @@ fn buildWideMul(
.vulkan, .opengl => {
// Operations return a struct{T, T}
// where T is maybe vectorized.
- const op_result_ty: Type = .fromInterned(try ip.getTupleType(zcu.gpa, pt.tid, .{
+ const op_result_ty: Type = .fromInterned(try ip.getTupleType(gpa, io, pt.tid, .{
.types = &.{ arith_op_ty.toIntern(), arith_op_ty.toIntern() },
.values = &.{ .none, .none },
}));
@@ -2330,7 +2333,7 @@ fn buildWideMul(
for (0..ops) |i| {
const op_result = cg.module.allocId();
- try cg.body.emitRaw(cg.module.gpa, opcode, 4);
+ try cg.body.emitRaw(gpa, opcode, 4);
cg.body.writeOperand(Id, op_result_ty_id);
cg.body.writeOperand(Id, op_result);
cg.body.writeOperand(Id, lhs_op.at(i));
@@ -2340,14 +2343,14 @@ fn buildWideMul(
// Temporary to deal with the fact that these are structs eventually,
// but for now, take the struct apart and return two separate vectors.
- try cg.body.emit(cg.module.gpa, .OpCompositeExtract, .{
+ try cg.body.emit(gpa, .OpCompositeExtract, .{
.id_result_type = arith_op_ty_id,
.id_result = value_results.at(i),
.composite = op_result,
.indexes = &.{0},
});
- try cg.body.emit(cg.module.gpa, .OpCompositeExtract, .{
+ try cg.body.emit(gpa, .OpCompositeExtract, .{
.id_result_type = arith_op_ty_id,
.id_result = overflow_results.at(i),
.composite = op_result,
diff --git a/src/codegen/x86_64/CodeGen.zig b/src/codegen/x86_64/CodeGen.zig
index 7e8e78334a..382c9e9c3c 100644
--- a/src/codegen/x86_64/CodeGen.zig
+++ b/src/codegen/x86_64/CodeGen.zig
@@ -180204,6 +180204,7 @@ fn airSplat(self: *CodeGen, inst: Air.Inst.Index) !void {
fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
const pt = self.pt;
const zcu = pt.zcu;
+ const io = zcu.comp.io;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const ty = self.typeOfIndex(inst);
@@ -180477,7 +180478,7 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
for (mask_elems, 0..) |*elem, bit| elem.* = @intCast(bit / elem_bits);
const mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
- .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, mask_elems, .maybe_embedded_nulls) },
+ .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, io, pt.tid, mask_elems, .maybe_embedded_nulls) },
} })));
const mask_mem: Memory = .{
.base = .{ .reg = try self.copyToTmpRegister(.usize, mask_mcv.address()) },
@@ -188476,6 +188477,7 @@ const Select = struct {
fn create(spec: TempSpec, s: *const Select) InnerError!struct { Temp, bool } {
const cg = s.cg;
const pt = cg.pt;
+ const io = pt.zcu.comp.io;
return switch (spec.kind) {
.unused => .{ undefined, false },
.any => .{ try cg.tempAlloc(spec.type), true },
@@ -188693,7 +188695,7 @@ const Select = struct {
};
return .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = spec.type.toIntern(),
- .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, elems, .maybe_embedded_nulls) },
+ .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, io, pt.tid, elems, .maybe_embedded_nulls) },
} }))), true };
},
.pshufb_trunc_mem => |trunc_spec| {
@@ -188720,7 +188722,7 @@ const Select = struct {
};
return .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = spec.type.toIntern(),
- .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, elems, .maybe_embedded_nulls) },
+ .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, io, pt.tid, elems, .maybe_embedded_nulls) },
} }))), true };
},
.pand_trunc_mem => |trunc_spec| {
@@ -188734,7 +188736,7 @@ const Select = struct {
while (index < elems.len) : (index += from_bytes) @memset(elems[index..][0..to_bytes], std.math.maxInt(u8));
return .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = spec.type.toIntern(),
- .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, elems, .maybe_embedded_nulls) },
+ .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, io, pt.tid, elems, .maybe_embedded_nulls) },
} }))), true };
},
.pand_mask_mem => |mask_spec| {
@@ -188753,7 +188755,7 @@ const Select = struct {
@memset(elems[mask_len..], invert_mask);
return .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = spec.type.toIntern(),
- .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, elems, .maybe_embedded_nulls) },
+ .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, io, pt.tid, elems, .maybe_embedded_nulls) },
} }))), true };
},
.ptest_mask_mem => |mask_ref| {
@@ -188778,7 +188780,7 @@ const Select = struct {
}
return .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = spec.type.toIntern(),
- .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, elems, .maybe_embedded_nulls) },
+ .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, io, pt.tid, elems, .maybe_embedded_nulls) },
} }))), true };
},
.pshufb_bswap_mem => |bswap_spec| {
@@ -188794,7 +188796,7 @@ const Select = struct {
};
return .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = spec.type.toIntern(),
- .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, elems, .maybe_embedded_nulls) },
+ .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, io, pt.tid, elems, .maybe_embedded_nulls) },
} }))), true };
},
.bits_mem => |direction| {
@@ -188808,7 +188810,7 @@ const Select = struct {
};
return .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = spec.type.toIntern(),
- .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, elems, .maybe_embedded_nulls) },
+ .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(zcu.gpa, io, pt.tid, elems, .maybe_embedded_nulls) },
} }))), true };
},
.splat_int_mem => |splat_spec| {
diff --git a/src/libs/freebsd.zig b/src/libs/freebsd.zig
index 47216c2a62..afeb5b3282 100644
--- a/src/libs/freebsd.zig
+++ b/src/libs/freebsd.zig
@@ -991,7 +991,8 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
});
}
-fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
+fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.Cancelable!void {
+ const io = comp.io;
const target = comp.getTarget();
const target_os_version = target.os.version_range.semver.min;
@@ -1002,8 +1003,8 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
var task_buffer_i: usize = 0;
{
- comp.mutex.lock(); // protect comp.arena
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io); // protect comp.arena
+ defer comp.mutex.unlock(io);
for (libs) |lib| {
if (lib.added_in) |add_in| {
@@ -1021,7 +1022,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
}
}
- comp.queuePrelinkTasks(task_buffer[0..task_buffer_i]);
+ try comp.queuePrelinkTasks(task_buffer[0..task_buffer_i]);
}
fn buildSharedLib(
@@ -1094,8 +1095,8 @@ fn buildSharedLib(
var sub_create_diag: Compilation.CreateDiagnostic = undefined;
const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
+ .thread_limit = comp.thread_limit,
.dirs = comp.dirs.withoutLocalCache(),
- .thread_pool = comp.thread_pool,
.self_exe_path = comp.self_exe_path,
// Because we manually cache the whole set of objects, we don't cache the individual objects
// within it. In fact, we *can't* do that, because we need `emit_bin` to specify the path.
diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig
index 76048239e0..64d0fdbeac 100644
--- a/src/libs/glibc.zig
+++ b/src/libs/glibc.zig
@@ -1135,7 +1135,8 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
});
}
-fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
+fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.Cancelable!void {
+ const io = comp.io;
const target_version = comp.getTarget().os.versionRange().gnuLibCVersion().?;
assert(comp.glibc_so_files == null);
@@ -1145,8 +1146,8 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
var task_buffer_i: usize = 0;
{
- comp.mutex.lock(); // protect comp.arena
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io); // protect comp.arena
+ defer comp.mutex.unlock(io);
for (libs) |lib| {
if (lib.removed_in) |rem_in| {
@@ -1163,7 +1164,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
}
}
- comp.queuePrelinkTasks(task_buffer[0..task_buffer_i]);
+ try comp.queuePrelinkTasks(task_buffer[0..task_buffer_i]);
}
fn buildSharedLib(
@@ -1233,8 +1234,8 @@ fn buildSharedLib(
var sub_create_diag: Compilation.CreateDiagnostic = undefined;
const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
+ .thread_limit = comp.thread_limit,
.dirs = comp.dirs.withoutLocalCache(),
- .thread_pool = comp.thread_pool,
.self_exe_path = comp.self_exe_path,
// Because we manually cache the whole set of objects, we don't cache the individual objects
// within it. In fact, we *can't* do that, because we need `emit_bin` to specify the path.
diff --git a/src/libs/libcxx.zig b/src/libs/libcxx.zig
index 405a06ba59..d293a3b899 100644
--- a/src/libs/libcxx.zig
+++ b/src/libs/libcxx.zig
@@ -106,7 +106,7 @@ pub const BuildError = error{
OutOfMemory,
AlreadyReported,
ZigCompilerNotBuiltWithLLVMExtensions,
-};
+} || std.Io.Cancelable;
pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void {
if (!build_options.have_llvm) {
@@ -256,13 +256,13 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
var sub_create_diag: Compilation.CreateDiagnostic = undefined;
const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
+ .thread_limit = comp.thread_limit,
.dirs = comp.dirs.withoutLocalCache(),
.self_exe_path = comp.self_exe_path,
.cache_mode = .whole,
.config = config,
.root_mod = root_mod,
.root_name = root_name,
- .thread_pool = comp.thread_pool,
.libc_installation = comp.libc_installation,
.emit_bin = .yes_cache,
.c_source_files = c_source_files.items,
@@ -295,7 +295,7 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
assert(comp.libcxx_static_lib == null);
const crt_file = try sub_compilation.toCrtFile();
comp.libcxx_static_lib = crt_file;
- comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
+ try comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
}
pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void {
@@ -449,13 +449,13 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
var sub_create_diag: Compilation.CreateDiagnostic = undefined;
const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
+ .thread_limit = comp.thread_limit,
.dirs = comp.dirs.withoutLocalCache(),
.self_exe_path = comp.self_exe_path,
.cache_mode = .whole,
.config = config,
.root_mod = root_mod,
.root_name = root_name,
- .thread_pool = comp.thread_pool,
.libc_installation = comp.libc_installation,
.emit_bin = .yes_cache,
.c_source_files = c_source_files.items,
@@ -492,7 +492,7 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
assert(comp.libcxxabi_static_lib == null);
const crt_file = try sub_compilation.toCrtFile();
comp.libcxxabi_static_lib = crt_file;
- comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
+ try comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
}
pub fn addCxxArgs(
diff --git a/src/libs/libtsan.zig b/src/libs/libtsan.zig
index 541d2bab3c..3dcbc132b1 100644
--- a/src/libs/libtsan.zig
+++ b/src/libs/libtsan.zig
@@ -11,7 +11,7 @@ pub const BuildError = error{
AlreadyReported,
ZigCompilerNotBuiltWithLLVMExtensions,
TSANUnsupportedCPUArchitecture,
-};
+} || std.Io.Cancelable;
pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void {
if (!build_options.have_llvm) {
@@ -279,8 +279,8 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
var sub_create_diag: Compilation.CreateDiagnostic = undefined;
const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
+ .thread_limit = comp.thread_limit,
.dirs = comp.dirs.withoutLocalCache(),
- .thread_pool = comp.thread_pool,
.self_exe_path = comp.self_exe_path,
.cache_mode = .whole,
.config = config,
@@ -319,7 +319,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
};
const crt_file = try sub_compilation.toCrtFile();
- comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
+ try comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
assert(comp.tsan_lib == null);
comp.tsan_lib = crt_file;
}
diff --git a/src/libs/libunwind.zig b/src/libs/libunwind.zig
index 26b8ae59b4..7ab079b205 100644
--- a/src/libs/libunwind.zig
+++ b/src/libs/libunwind.zig
@@ -12,7 +12,7 @@ pub const BuildError = error{
OutOfMemory,
AlreadyReported,
ZigCompilerNotBuiltWithLLVMExtensions,
-};
+} || std.Io.Cancelable;
pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void {
if (!build_options.have_llvm) {
@@ -145,6 +145,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
var sub_create_diag: Compilation.CreateDiagnostic = undefined;
const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
+ .thread_limit = comp.thread_limit,
.dirs = comp.dirs.withoutLocalCache(),
.self_exe_path = comp.self_exe_path,
.config = config,
@@ -152,7 +153,6 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
.cache_mode = .whole,
.root_name = root_name,
.main_mod = null,
- .thread_pool = comp.thread_pool,
.libc_installation = comp.libc_installation,
.emit_bin = .yes_cache,
.function_sections = comp.function_sections,
@@ -184,7 +184,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
};
const crt_file = try sub_compilation.toCrtFile();
- comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
+ try comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
assert(comp.libunwind_static_lib == null);
comp.libunwind_static_lib = crt_file;
}
diff --git a/src/libs/mingw.zig b/src/libs/mingw.zig
index b706d3d457..b3c018996a 100644
--- a/src/libs/mingw.zig
+++ b/src/libs/mingw.zig
@@ -281,8 +281,8 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
const sub_path = try std.fs.path.join(gpa, &.{ "o", &digest, final_lib_basename });
errdefer gpa.free(sub_path);
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
try comp.crt_files.ensureUnusedCapacity(gpa, 1);
comp.crt_files.putAssumeCapacityNoClobber(final_lib_basename, .{
.full_object_path = .{
@@ -388,8 +388,8 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
log.warn("failed to write cache manifest for DLL import {s}.lib: {s}", .{ lib_name, @errorName(err) });
};
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
try comp.crt_files.putNoClobber(gpa, final_lib_basename, .{
.full_object_path = .{
.root_dir = comp.dirs.global_cache,
diff --git a/src/libs/musl.zig b/src/libs/musl.zig
index 69bd892b3b..1a1807f250 100644
--- a/src/libs/musl.zig
+++ b/src/libs/musl.zig
@@ -248,12 +248,12 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
var sub_create_diag: Compilation.CreateDiagnostic = undefined;
const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
+ .thread_limit = comp.thread_limit,
.dirs = comp.dirs.withoutLocalCache(),
.self_exe_path = comp.self_exe_path,
.cache_mode = .whole,
.config = config,
.root_mod = root_mod,
- .thread_pool = comp.thread_pool,
.root_name = "c",
.libc_installation = comp.libc_installation,
.emit_bin = .yes_cache,
@@ -287,10 +287,10 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
errdefer comp.gpa.free(basename);
const crt_file = try sub_compilation.toCrtFile();
- comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
+ try comp.queuePrelinkTaskMode(crt_file.full_object_path, &config);
{
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
comp.crt_files.putAssumeCapacityNoClobber(basename, crt_file);
}
diff --git a/src/libs/netbsd.zig b/src/libs/netbsd.zig
index 07a7da7f6f..8d35e3bd71 100644
--- a/src/libs/netbsd.zig
+++ b/src/libs/netbsd.zig
@@ -645,7 +645,8 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
});
}
-fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
+fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.Cancelable!void {
+ const io = comp.io;
assert(comp.netbsd_so_files == null);
comp.netbsd_so_files = so_files;
@@ -653,8 +654,8 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
var task_buffer_i: usize = 0;
{
- comp.mutex.lock(); // protect comp.arena
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io); // protect comp.arena
+ defer comp.mutex.unlock(io);
for (libs) |lib| {
const so_path: Path = .{
@@ -668,7 +669,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
}
}
- comp.queuePrelinkTasks(task_buffer[0..task_buffer_i]);
+ try comp.queuePrelinkTasks(task_buffer[0..task_buffer_i]);
}
fn buildSharedLib(
@@ -737,8 +738,8 @@ fn buildSharedLib(
var sub_create_diag: Compilation.CreateDiagnostic = undefined;
const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
+ .thread_limit = comp.thread_limit,
.dirs = comp.dirs.withoutLocalCache(),
- .thread_pool = comp.thread_pool,
.self_exe_path = comp.self_exe_path,
// Because we manually cache the whole set of objects, we don't cache the individual objects
// within it. In fact, we *can't* do that, because we need `emit_bin` to specify the path.
diff --git a/src/link.zig b/src/link.zig
index 6be88490b0..6ac96504c7 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -34,7 +34,8 @@ pub const Diags = struct {
/// Stored here so that function definitions can distinguish between
/// needing an allocator for things besides error reporting.
gpa: Allocator,
- mutex: std.Thread.Mutex,
+ io: Io,
+ mutex: Io.Mutex,
msgs: std.ArrayList(Msg),
flags: Flags,
lld: std.ArrayList(Lld),
@@ -126,10 +127,11 @@ pub const Diags = struct {
}
};
- pub fn init(gpa: Allocator) Diags {
+ pub fn init(gpa: Allocator, io: Io) Diags {
return .{
.gpa = gpa,
- .mutex = .{},
+ .io = io,
+ .mutex = .init,
.msgs = .empty,
.flags = .{},
.lld = .empty,
@@ -153,8 +155,10 @@ pub const Diags = struct {
}
pub fn lockAndParseLldStderr(diags: *Diags, prefix: []const u8, stderr: []const u8) void {
- diags.mutex.lock();
- defer diags.mutex.unlock();
+ const io = diags.io;
+
+ diags.mutex.lockUncancelable(io);
+ defer diags.mutex.unlock(io);
diags.parseLldStderr(prefix, stderr) catch diags.setAllocFailure();
}
@@ -226,9 +230,10 @@ pub const Diags = struct {
pub fn addErrorSourceLocation(diags: *Diags, sl: SourceLocation, comptime format: []const u8, args: anytype) void {
@branchHint(.cold);
const gpa = diags.gpa;
+ const io = diags.io;
const eu_main_msg = std.fmt.allocPrint(gpa, format, args);
- diags.mutex.lock();
- defer diags.mutex.unlock();
+ diags.mutex.lockUncancelable(io);
+ defer diags.mutex.unlock(io);
addErrorLockedFallible(diags, sl, eu_main_msg) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailureLocked(),
};
@@ -247,8 +252,9 @@ pub const Diags = struct {
pub fn addErrorWithNotes(diags: *Diags, note_count: usize) error{OutOfMemory}!ErrorWithNotes {
@branchHint(.cold);
const gpa = diags.gpa;
- diags.mutex.lock();
- defer diags.mutex.unlock();
+ const io = diags.io;
+ diags.mutex.lockUncancelable(io);
+ defer diags.mutex.unlock(io);
try diags.msgs.ensureUnusedCapacity(gpa, 1);
return addErrorWithNotesAssumeCapacity(diags, note_count);
}
@@ -276,9 +282,10 @@ pub const Diags = struct {
) void {
@branchHint(.cold);
const gpa = diags.gpa;
+ const io = diags.io;
const eu_main_msg = std.fmt.allocPrint(gpa, format, args);
- diags.mutex.lock();
- defer diags.mutex.unlock();
+ diags.mutex.lockUncancelable(io);
+ defer diags.mutex.unlock(io);
addMissingLibraryErrorLockedFallible(diags, checked_paths, eu_main_msg) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailureLocked(),
};
@@ -312,9 +319,10 @@ pub const Diags = struct {
) void {
@branchHint(.cold);
const gpa = diags.gpa;
+ const io = diags.io;
const eu_main_msg = std.fmt.allocPrint(gpa, format, args);
- diags.mutex.lock();
- defer diags.mutex.unlock();
+ diags.mutex.lockUncancelable(io);
+ defer diags.mutex.unlock(io);
addParseErrorLockedFallible(diags, path, eu_main_msg) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailureLocked(),
};
@@ -349,8 +357,9 @@ pub const Diags = struct {
pub fn setAllocFailure(diags: *Diags) void {
@branchHint(.cold);
- diags.mutex.lock();
- defer diags.mutex.unlock();
+ const io = diags.io;
+ diags.mutex.lockUncancelable(io);
+ defer diags.mutex.unlock(io);
setAllocFailureLocked(diags);
}
@@ -1101,6 +1110,7 @@ pub const File = struct {
const comp = base.comp;
const diags = &comp.link_diags;
const gpa = comp.gpa;
+ const io = comp.io;
const stat = try file.stat();
const size = std.math.cast(u32, stat.size) orelse return error.FileTooBig;
const buf = try gpa.alloc(u8, size);
@@ -1123,8 +1133,8 @@ pub const File = struct {
} else {
if (fs.path.isAbsolute(arg.path)) {
const new_path = Path.initCwd(path: {
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
break :path try comp.arena.dupe(u8, arg.path);
});
switch (Compilation.classifyFileExt(arg.path)) {
@@ -1309,61 +1319,13 @@ pub const ZcuTask = union(enum) {
/// Write the constant value for a Decl to the output file.
link_nav: InternPool.Nav.Index,
/// Write the machine code for a function to the output file.
- link_func: LinkFunc,
+ link_func: Zcu.CodegenTaskPool.Index,
link_type: InternPool.Index,
update_line_number: InternPool.TrackedInst.Index,
- pub fn deinit(task: ZcuTask, zcu: *const Zcu) void {
- switch (task) {
- .link_nav,
- .link_type,
- .update_line_number,
- => {},
- .link_func => |link_func| {
- switch (link_func.mir.status.load(.acquire)) {
- .pending => unreachable, // cannot deinit until MIR done
- .failed => {}, // MIR not populated so doesn't need freeing
- .ready => link_func.mir.value.deinit(zcu),
- }
- zcu.gpa.destroy(link_func.mir);
- },
- }
- }
- pub const LinkFunc = struct {
- /// This will either be a non-generic `func_decl` or a `func_instance`.
- func: InternPool.Index,
- /// This pointer is allocated into `gpa` and must be freed when the `ZcuTask` is processed.
- /// The pointer is shared with the codegen worker, which will populate the MIR inside once
- /// it has been generated. It's important that the `link_func` is queued at the same time as
- /// the codegen job to ensure that the linker receives functions in a deterministic order,
- /// allowing reproducible builds.
- mir: *SharedMir,
- /// This is not actually used by `doZcuTask`. Instead, `Queue` uses this value as a heuristic
- /// to avoid queueing too much AIR/MIR for codegen/link at a time. Essentially, we cap the
- /// total number of AIR bytes which are being processed at once, preventing unbounded memory
- /// usage when AIR is produced faster than it is processed.
- air_bytes: u32,
-
- pub const SharedMir = struct {
- /// This is initially `.pending`. When `value` is populated, the codegen thread will set
- /// this to `.ready`, and alert the queue if needed. It could also end up `.failed`.
- /// The action of storing a value (other than `.pending`) to this atomic transfers
- /// ownership of memory assoicated with `value` to this `ZcuTask`.
- status: std.atomic.Value(enum(u8) {
- /// We are waiting on codegen to generate MIR (or die trying).
- pending,
- /// `value` is not populated and will not be populated. Just drop the task from the queue and move on.
- failed,
- /// `value` is populated with the MIR from the backend in use, which is not LLVM.
- ready,
- }),
- /// This is `undefined` until `ready` is set to `true`. Once populated, this MIR belongs
- /// to the `ZcuTask`, and must be `deinit`ed when it is processed. Allocated into `gpa`.
- value: codegen.AnyMir,
- };
- };
};
pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void {
+ const io = comp.io;
const diags = &comp.link_diags;
const base = comp.bin_file orelse {
comp.link_prog_node.completeOne();
@@ -1372,8 +1334,8 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void {
var timer = comp.startTimer();
defer if (timer.finish()) |ns| {
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
comp.time_report.?.stats.cpu_ns_link += ns;
};
@@ -1484,6 +1446,7 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void {
}
}
pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
+ const io = comp.io;
const diags = &comp.link_diags;
const zcu = comp.zcu.?;
const ip = &zcu.intern_pool;
@@ -1492,8 +1455,8 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
var timer = comp.startTimer();
- switch (task) {
- .link_nav => |nav_index| {
+ const maybe_nav: ?InternPool.Nav.Index = switch (task) {
+ .link_nav => |nav_index| nav: {
const fqn_slice = ip.getNav(nav_index).fqn.toSlice(ip);
const nav_prog_node = comp.link_prog_node.start(fqn_slice, 0);
defer nav_prog_node.end();
@@ -1514,21 +1477,25 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
},
};
}
+ break :nav nav_index;
},
- .link_func => |func| {
- const nav = zcu.funcInfo(func.func).owner_nav;
+ .link_func => |codegen_task| nav: {
+ timer.pause();
+ const func, var mir = codegen_task.wait(&zcu.codegen_task_pool, io) catch |err| switch (err) {
+ error.Canceled, error.AlreadyReported => return,
+ };
+ defer mir.deinit(zcu);
+ timer.@"resume"();
+
+ const nav = zcu.funcInfo(func).owner_nav;
const fqn_slice = ip.getNav(nav).fqn.toSlice(ip);
+
const nav_prog_node = comp.link_prog_node.start(fqn_slice, 0);
defer nav_prog_node.end();
- switch (func.mir.status.load(.acquire)) {
- .pending => unreachable,
- .ready => {},
- .failed => return,
- }
+
assert(zcu.llvm_object == null); // LLVM codegen doesn't produce MIR
- const mir = &func.mir.value;
if (comp.bin_file) |lf| {
- lf.updateFunc(pt, func.func, mir) catch |err| switch (err) {
+ lf.updateFunc(pt, func, &mir) catch |err| switch (err) {
error.OutOfMemory => return diags.setAllocFailure(),
error.CodegenFail => return zcu.assertCodegenFailed(nav),
error.Overflow, error.RelocationNotByteAligned => {
@@ -1539,8 +1506,9 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
},
};
}
+ break :nav ip.indexToKey(func).func.owner_nav;
},
- .link_type => |ty| {
+ .link_type => |ty| nav: {
const name = Type.fromInterned(ty).containerTypeName(ip).toSlice(ip);
const nav_prog_node = comp.link_prog_node.start(name, 0);
defer nav_prog_node.end();
@@ -1552,8 +1520,9 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
};
}
}
+ break :nav null;
},
- .update_line_number => |ti| {
+ .update_line_number => |ti| nav: {
const nav_prog_node = comp.link_prog_node.start("Update line number", 0);
defer nav_prog_node.end();
if (pt.zcu.llvm_object == null) {
@@ -1564,21 +1533,18 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
};
}
}
+ break :nav null;
},
- }
+ };
if (timer.finish()) |ns_link| report_time: {
- const zir_decl: ?InternPool.TrackedInst.Index = switch (task) {
- .link_type, .update_line_number => null,
- .link_nav => |nav| ip.getNav(nav).srcInst(ip),
- .link_func => |f| ip.getNav(ip.indexToKey(f.func).func.owner_nav).srcInst(ip),
- };
- comp.mutex.lock();
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io);
+ defer comp.mutex.unlock(io);
const tr = &zcu.comp.time_report.?;
tr.stats.cpu_ns_link += ns_link;
- if (zir_decl) |inst| {
- const gop = tr.decl_link_ns.getOrPut(zcu.gpa, inst) catch |err| switch (err) {
+ if (maybe_nav) |nav| {
+ const zir_decl = ip.getNav(nav).srcInst(ip);
+ const gop = tr.decl_link_ns.getOrPut(zcu.gpa, zir_decl) catch |err| switch (err) {
error.OutOfMemory => {
zcu.comp.setAllocFailure();
break :report_time;
@@ -2208,8 +2174,13 @@ fn resolvePathInputLib(
const n2 = file.preadAll(buf2, n) catch |err|
fatal("failed to read {f}: {s}", .{ test_path, @errorName(err) });
if (n2 != buf2.len) fatal("failed to read {f}: unexpected end of file", .{test_path});
- var diags = Diags.init(gpa);
+
+ // This `Io` is only used for a mutex, and we know we aren't doing anything async/concurrent.
+ var threaded: Io.Threaded = .init_single_threaded;
+ defer threaded.deinit();
+ var diags: Diags = .init(gpa, threaded.io());
defer diags.deinit();
+
const ld_script_result = LdScript.parse(gpa, &diags, test_path, ld_script_bytes.items);
if (diags.hasErrors()) {
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index fa41c6b1de..69acbe034b 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -713,6 +713,7 @@ pub fn allocateChunk(self: *Elf, args: struct {
pub fn loadInput(self: *Elf, input: link.Input) !void {
const comp = self.base.comp;
const gpa = comp.gpa;
+ const io = comp.io;
const diags = &comp.link_diags;
const target = self.getTarget();
const debug_fmt_strip = comp.config.debug_format == .strip;
@@ -720,8 +721,8 @@ pub fn loadInput(self: *Elf, input: link.Input) !void {
const is_static_lib = self.base.isStaticLib();
if (comp.verbose_link) {
- comp.mutex.lock(); // protect comp.arena
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io); // protect comp.arena
+ defer comp.mutex.unlock(io);
const argv = &self.dump_argv_list;
switch (input) {
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 035bd60155..2c4ffd6632 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -29,9 +29,9 @@ resolver: SymbolResolver = .{},
/// This table will be populated after `scanRelocs` has run.
/// Key is symbol index.
undefs: std.AutoArrayHashMapUnmanaged(SymbolResolver.Index, UndefRefs) = .empty,
-undefs_mutex: std.Thread.Mutex = .{},
+undefs_mutex: std.Io.Mutex = .init,
dupes: std.AutoArrayHashMapUnmanaged(SymbolResolver.Index, std.ArrayList(File.Index)) = .empty,
-dupes_mutex: std.Thread.Mutex = .{},
+dupes_mutex: std.Io.Mutex = .init,
dyld_info_cmd: macho.dyld_info_command = .{},
symtab_cmd: macho.symtab_command = .{},
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index a8c1ed843e..8f0f801949 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -555,9 +555,10 @@ fn reportUndefSymbol(self: Atom, rel: Relocation, macho_file: *MachO) !bool {
const file = self.getFile(macho_file);
const ref = file.getSymbolRef(rel.target, macho_file);
if (ref.getFile(macho_file) == null) {
- macho_file.undefs_mutex.lock();
- defer macho_file.undefs_mutex.unlock();
const gpa = macho_file.base.comp.gpa;
+ const io = macho_file.base.comp.io;
+ macho_file.undefs_mutex.lockUncancelable(io);
+ defer macho_file.undefs_mutex.unlock(io);
const gop = try macho_file.undefs.getOrPut(gpa, file.getGlobals()[rel.target]);
if (!gop.found_existing) {
gop.value_ptr.* = .{ .refs = .{} };
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index 0390cc2fe4..5bded3b9e3 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -289,7 +289,7 @@ pub fn writeAdhocSignature(
self.code_directory.inner.nCodeSlots = total_pages;
// Calculate hash for each page (in file) and write it to the buffer
- var hasher = Hasher(Sha256){ .allocator = allocator, .thread_pool = macho_file.base.comp.thread_pool };
+ var hasher = Hasher(Sha256){ .allocator = allocator, .io = macho_file.base.comp.io };
try hasher.hash(opts.file, self.code_directory.code_slots.items, .{
.chunk_size = self.page_size,
.max_file_size = opts.file_size,
diff --git a/src/link/MachO/InternalObject.zig b/src/link/MachO/InternalObject.zig
index 802381207e..fb4571d451 100644
--- a/src/link/MachO/InternalObject.zig
+++ b/src/link/MachO/InternalObject.zig
@@ -512,8 +512,9 @@ pub fn checkUndefs(self: InternalObject, macho_file: *MachO) !void {
const addUndef = struct {
fn addUndef(mf: *MachO, index: MachO.SymbolResolver.Index, tag: anytype) !void {
const gpa = mf.base.comp.gpa;
- mf.undefs_mutex.lock();
- defer mf.undefs_mutex.unlock();
+ const io = mf.base.comp.io;
+ mf.undefs_mutex.lockUncancelable(io);
+ defer mf.undefs_mutex.unlock(io);
const gop = try mf.undefs.getOrPut(gpa, index);
if (!gop.found_existing) {
gop.value_ptr.* = tag;
diff --git a/src/link/MachO/file.zig b/src/link/MachO/file.zig
index ca3e1c0e82..05b43de181 100644
--- a/src/link/MachO/file.zig
+++ b/src/link/MachO/file.zig
@@ -242,6 +242,7 @@ pub const File = union(enum) {
const tracy = trace(@src());
defer tracy.end();
+ const io = macho_file.base.comp.io;
const gpa = macho_file.base.comp.gpa;
for (file.getSymbols(), file.getNlists(), 0..) |sym, nlist, i| {
@@ -252,8 +253,8 @@ pub const File = union(enum) {
const ref_file = ref.getFile(macho_file) orelse continue;
if (ref_file.getIndex() == file.getIndex()) continue;
- macho_file.dupes_mutex.lock();
- defer macho_file.dupes_mutex.unlock();
+ macho_file.dupes_mutex.lockUncancelable(io);
+ defer macho_file.dupes_mutex.unlock(io);
const gop = try macho_file.dupes.getOrPut(gpa, file.getGlobals()[i]);
if (!gop.found_existing) {
diff --git a/src/link/MachO/hasher.zig b/src/link/MachO/hasher.zig
index f10a2fe8cf..78cd847c40 100644
--- a/src/link/MachO/hasher.zig
+++ b/src/link/MachO/hasher.zig
@@ -3,7 +3,7 @@ pub fn ParallelHasher(comptime Hasher: type) type {
return struct {
allocator: Allocator,
- thread_pool: *ThreadPool,
+ io: std.Io,
pub fn hash(self: Self, file: fs.File, out: [][hash_size]u8, opts: struct {
chunk_size: u64 = 0x4000,
@@ -12,7 +12,7 @@ pub fn ParallelHasher(comptime Hasher: type) type {
const tracy = trace(@src());
defer tracy.end();
- var wg: WaitGroup = .{};
+ const io = self.io;
const file_size = blk: {
const file_size = opts.max_file_size orelse try file.getEndPos();
@@ -27,8 +27,8 @@ pub fn ParallelHasher(comptime Hasher: type) type {
defer self.allocator.free(results);
{
- wg.reset();
- defer wg.wait();
+ var group: std.Io.Group = .init;
+ errdefer group.cancel(io);
for (out, results, 0..) |*out_buf, *result, i| {
const fstart = i * chunk_size;
@@ -36,7 +36,7 @@ pub fn ParallelHasher(comptime Hasher: type) type {
file_size - fstart
else
chunk_size;
- self.thread_pool.spawnWg(&wg, worker, .{
+ group.async(io, worker, .{
file,
fstart,
buffer[fstart..][0..fsize],
@@ -44,6 +44,8 @@ pub fn ParallelHasher(comptime Hasher: type) type {
&(result.*),
});
}
+
+ group.wait(io);
}
for (results) |result| _ = try result;
}
@@ -72,5 +74,3 @@ const std = @import("std");
const trace = @import("../../tracy.zig").trace;
const Allocator = mem.Allocator;
-const ThreadPool = std.Thread.Pool;
-const WaitGroup = std.Thread.WaitGroup;
diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig
index 09807a2845..d2a6c2a3ab 100644
--- a/src/link/MachO/relocatable.zig
+++ b/src/link/MachO/relocatable.zig
@@ -773,7 +773,6 @@ fn writeHeader(macho_file: *MachO, ncmds: usize, sizeofcmds: usize) !void {
const std = @import("std");
const Path = std.Build.Cache.Path;
-const WaitGroup = std.Thread.WaitGroup;
const assert = std.debug.assert;
const log = std.log.scoped(.link);
const macho = std.macho;
diff --git a/src/link/MachO/uuid.zig b/src/link/MachO/uuid.zig
index 565ae80b22..d08ac0c5b8 100644
--- a/src/link/MachO/uuid.zig
+++ b/src/link/MachO/uuid.zig
@@ -15,7 +15,7 @@ pub fn calcUuid(comp: *const Compilation, file: fs.File, file_size: u64, out: *[
const hashes = try comp.gpa.alloc([Md5.digest_length]u8, actual_num_chunks);
defer comp.gpa.free(hashes);
- var hasher = Hasher(Md5){ .allocator = comp.gpa, .thread_pool = comp.thread_pool };
+ var hasher = Hasher(Md5){ .allocator = comp.gpa, .io = comp.io };
try hasher.hash(file, hashes, .{
.chunk_size = chunk_size,
.max_file_size = file_size,
@@ -46,4 +46,3 @@ const trace = @import("../../tracy.zig").trace;
const Compilation = @import("../../Compilation.zig");
const Md5 = std.crypto.hash.Md5;
const Hasher = @import("hasher.zig").ParallelHasher;
-const ThreadPool = std.Thread.Pool;
diff --git a/src/link/Queue.zig b/src/link/Queue.zig
index 742b4664f1..e8e7700695 100644
--- a/src/link/Queue.zig
+++ b/src/link/Queue.zig
@@ -1,254 +1,171 @@
//! Stores and manages the queue of link tasks. Each task is either a `PrelinkTask` or a `ZcuTask`.
//!
-//! There must be at most one link thread (the thread processing these tasks) active at a time. If
-//! `!comp.separateCodegenThreadOk()`, then ZCU tasks will be run on the main thread, bypassing this
-//! queue entirely.
+//! There are two `std.Io.Queue`s, for prelink and ZCU tasks respectively. The compiler writes tasks
+//! to these queues, and a single concurrent linker task receives and processes them. `Compilation`
+//! is responsible for calling `finishPrelinkQueue` and `finishZcuQueue` once all relevant tasks
+//! have been queued. All prelink tasks must be queued and completed before any ZCU tasks can be
+//! processed.
//!
-//! All prelink tasks must be processed before any ZCU tasks are processed. After all prelink tasks
-//! are run, but before any ZCU tasks are run, `prelink` must be called on the `link.File`.
+//! If concurrency is unavailable, the `enqueuePrelink` and `enqueueZcu` functions will instead run
+//! the given tasks immediately---the queues are unused.
//!
-//! There will sometimes be a `ZcuTask` in the queue which is not yet ready because it depends on
-//! MIR which has not yet been generated by any codegen thread. In this case, we must pause
-//! processing of linker tasks until the MIR is ready. It would be incorrect to run any other link
-//! tasks first, since this would make builds unreproducible.
+//! If the codegen backend does not permit concurrency, then `Compilation` will call `finishZcuQueue`
+//! early so that the concurrent linker task exists after prelink and ZCU tasks will run
+//! non-concurrently in `enqueueZcu`.
-mutex: std.Thread.Mutex,
-/// Validates that only one `flushTaskQueue` thread is running at a time.
-flush_safety: std.debug.SafetyLock,
+/// This is the concurrent call to `runLinkTasks`. It may be set to non-`null` in `start`, and is
+/// set to `null` by the main thread after it is canceled. It is not otherwise modified; as such, it
+/// may be checked non-atomically. If a task is being queued and this is `null`, tasks must be run
+/// eagerly.
+future: ?std.Io.Future(void),
-/// This value is positive while there are still prelink tasks yet to be queued. Once they are
-/// all queued, this value becomes 0, and ZCU tasks can be run. Guarded by `mutex`.
-prelink_wait_count: u32,
+/// This is only used if `future == null` during prelink. In that case, it is used to ensure that
+/// only one prelink task is run at a time.
+prelink_mutex: std.Io.Mutex,
-/// Prelink tasks which have been enqueued and are not yet owned by the worker thread.
-/// Allocated into `gpa`, guarded by `mutex`.
-queued_prelink: std.ArrayList(PrelinkTask),
-/// The worker thread moves items from `queued_prelink` into this array in order to process them.
-/// Allocated into `gpa`, accessed only by the worker thread.
-wip_prelink: std.ArrayList(PrelinkTask),
+/// Only valid if `future != null`.
+prelink_queue: std.Io.Queue(PrelinkTask),
+/// Only valid if `future != null`.
+zcu_queue: std.Io.Queue(ZcuTask),
-/// Like `queued_prelink`, but for ZCU tasks.
-/// Allocated into `gpa`, guarded by `mutex`.
-queued_zcu: std.ArrayList(ZcuTask),
-/// Like `wip_prelink`, but for ZCU tasks.
-/// Allocated into `gpa`, accessed only by the worker thread.
-wip_zcu: std.ArrayList(ZcuTask),
-
-/// When processing ZCU link tasks, we might have to block due to unpopulated MIR. When this
-/// happens, some tasks in `wip_zcu` have been run, and some are still pending. This is the
-/// index into `wip_zcu` which we have reached.
-wip_zcu_idx: usize,
-
-/// The sum of all `air_bytes` for all currently-queued `ZcuTask.link_func` tasks. Because
-/// MIR bytes are approximately proportional to AIR bytes, this acts to limit the amount of
-/// AIR and MIR which is queued for codegen and link respectively, to prevent excessive
-/// memory usage if analysis produces AIR faster than it can be processed by codegen/link.
-/// The cap is `max_air_bytes_in_flight`.
-/// Guarded by `mutex`.
-air_bytes_in_flight: u32,
-/// If nonzero, then a call to `enqueueZcu` is blocked waiting to add a `link_func` task, but
-/// cannot until `air_bytes_in_flight` is no greater than this value.
-/// Guarded by `mutex`.
-air_bytes_waiting: u32,
-/// After setting `air_bytes_waiting`, `enqueueZcu` will wait on this condition (with `mutex`).
-/// When `air_bytes_waiting` many bytes can be queued, this condition should be signaled.
-air_bytes_cond: std.Thread.Condition,
-
-/// Guarded by `mutex`.
-state: union(enum) {
- /// The link thread is currently running or queued to run.
- running,
- /// The link thread is not running or queued, because it has exhausted all immediately available
- /// tasks. It should be spawned when more tasks are enqueued. If `prelink_wait_count` is not
- /// zero, we are specifically waiting for prelink tasks.
- finished,
- /// The link thread is not running or queued, because it is waiting for this MIR to be populated.
- /// Once codegen completes, it must call `mirReady` which will restart the link thread.
- wait_for_mir: InternPool.Index,
-},
-
-/// In the worst observed case, MIR is around 50 times as large as AIR. More typically, the ratio is
-/// around 20. Going by that 50x multiplier, and assuming we want to consume no more than 500 MiB of
-/// memory on AIR/MIR, we see a limit of around 10 MiB of AIR in-flight.
-const max_air_bytes_in_flight = 10 * 1024 * 1024;
+/// The capacity of the task queue buffers.
+pub const buffer_size = 512;
/// The initial `Queue` state, containing no tasks, expecting no prelink tasks, and with no running worker thread.
/// The `queued_prelink` field may be appended to before calling `start`.
pub const empty: Queue = .{
- .mutex = .{},
- .flush_safety = .{},
- .prelink_wait_count = undefined, // set in `start`
- .queued_prelink = .empty,
- .wip_prelink = .empty,
- .queued_zcu = .empty,
- .wip_zcu = .empty,
- .wip_zcu_idx = 0,
- .state = .finished,
- .air_bytes_in_flight = 0,
- .air_bytes_waiting = 0,
- .air_bytes_cond = .{},
+ .future = null,
+ .prelink_mutex = .init,
+ .prelink_queue = undefined, // set in `start` if needed
+ .zcu_queue = undefined, // set in `start` if needed
};
-/// `lf` is needed to correctly deinit any pending `ZcuTask`s.
-pub fn deinit(q: *Queue, comp: *Compilation) void {
- const gpa = comp.gpa;
- for (q.queued_zcu.items) |t| t.deinit(comp.zcu.?);
- for (q.wip_zcu.items[q.wip_zcu_idx..]) |t| t.deinit(comp.zcu.?);
- q.queued_prelink.deinit(gpa);
- q.wip_prelink.deinit(gpa);
- q.queued_zcu.deinit(gpa);
- q.wip_zcu.deinit(gpa);
+
+pub fn cancel(q: *Queue, io: Io) void {
+ if (q.future) |*f| {
+ f.cancel(io);
+ q.future = null;
+ }
+}
+
+pub fn wait(q: *Queue, io: Io) void {
+ if (q.future) |*f| {
+ f.await(io);
+ q.future = null;
+ }
}
/// This is expected to be called exactly once, after which the caller must not directly access
/// `queued_prelink` any longer. This will spawn the link thread if necessary.
-pub fn start(q: *Queue, comp: *Compilation) void {
- assert(q.state == .finished);
- assert(q.queued_zcu.items.len == 0);
- // Reset this to 1. We can't init it to 1 in `empty`, because it would fall to 0 on successive
- // incremental updates, but we still need the initial 1.
- q.prelink_wait_count = 1;
- if (q.queued_prelink.items.len != 0) {
- q.state = .running;
- comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
+pub fn start(
+ q: *Queue,
+ comp: *Compilation,
+ arena: Allocator,
+) Allocator.Error!void {
+ assert(q.future == null);
+ q.prelink_queue = .init(try arena.alloc(PrelinkTask, buffer_size));
+ q.zcu_queue = .init(try arena.alloc(ZcuTask, buffer_size));
+ if (comp.io.concurrent(runLinkTasks, .{ q, comp })) |future| {
+ // We will run link tasks concurrently.
+ q.future = future;
+ } else |err| switch (err) {
+ error.ConcurrencyUnavailable => {
+ // We will run link tasks on the main thread.
+ q.prelink_queue = undefined;
+ q.zcu_queue = undefined;
+ },
}
}
-/// Every call to this must be paired with a call to `finishPrelinkItem`.
-pub fn startPrelinkItem(q: *Queue) void {
- q.mutex.lock();
- defer q.mutex.unlock();
- assert(q.prelink_wait_count > 0); // must not have finished everything already
- q.prelink_wait_count += 1;
-}
-/// This function must be called exactly one more time than `startPrelinkItem` is. The final call
-/// indicates that we have finished calling `startPrelinkItem`, so once all pending items finish,
-/// we are ready to move on to ZCU tasks.
-pub fn finishPrelinkItem(q: *Queue, comp: *Compilation) void {
- {
- q.mutex.lock();
- defer q.mutex.unlock();
- q.prelink_wait_count -= 1;
- if (q.prelink_wait_count != 0) return;
- // The prelink task count dropped to 0; restart the linker thread if necessary.
- switch (q.state) {
- .wait_for_mir => unreachable, // we've not started zcu tasks yet
- .running => return,
- .finished => {},
- }
- assert(q.queued_prelink.items.len == 0);
- // Even if there are no ZCU tasks, we must restart the linker thread to make sure
- // that `link.File.prelink()` is called.
- q.state = .running;
+/// Enqueues all prelink tasks in `tasks`. Asserts that they were expected, i.e. that
+/// the queue is not yet closed. Also asserts that `tasks.len` is not 0.
+pub fn enqueuePrelink(q: *Queue, comp: *Compilation, tasks: []const PrelinkTask) Io.Cancelable!void {
+ const io = comp.io;
+
+ if (q.future != null) {
+ q.prelink_queue.putAll(io, tasks) catch |err| switch (err) {
+ error.Canceled => |e| return e,
+ error.Closed => unreachable,
+ };
+ } else {
+ try q.prelink_mutex.lock(io);
+ defer q.prelink_mutex.unlock(io);
+ for (tasks) |task| link.doPrelinkTask(comp, task);
}
- comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
}
-/// Called by codegen workers after they have populated a `ZcuTask.LinkFunc.SharedMir`. If the link
-/// thread was waiting for this MIR, it can resume.
-pub fn mirReady(q: *Queue, comp: *Compilation, func_index: InternPool.Index, mir: *ZcuTask.LinkFunc.SharedMir) void {
- // We would like to assert that `mir` is not pending, but that would race with a worker thread
- // potentially freeing it.
- {
- q.mutex.lock();
- defer q.mutex.unlock();
- switch (q.state) {
- .finished, .running => return,
- .wait_for_mir => |wait_for| if (wait_for != func_index) return,
+pub fn enqueueZcu(
+ q: *Queue,
+ comp: *Compilation,
+ tid: usize,
+ task: ZcuTask,
+) Io.Cancelable!void {
+ const io = comp.io;
+
+ assert(tid == 0);
+
+ if (q.future != null) {
+ if (q.zcu_queue.putOne(io, task)) |_| {
+ return;
+ } else |err| switch (err) {
+ error.Canceled => |e| return e,
+ error.Closed => {
+ // The linker is still processing prelink tasks. Wait for those
+ // to finish, after which the linker task will exist, and ZCU
+ // tasks will be run non-concurrently. This logic exists for
+ // backends which do not support `Zcu.Feature.separate_thread`.
+ q.wait(io);
+ },
}
- // We were waiting for `mir`, so we will restart the linker thread.
- q.state = .running;
}
- assert(mir.status.load(.acquire) != .pending);
- comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
+
+ link.doZcuTask(comp, tid, task);
}
-/// Enqueues all prelink tasks in `tasks`. Asserts that they were expected, i.e. that
-/// `prelink_wait_count` is not yet 0. Also asserts that `tasks.len` is not 0.
-pub fn enqueuePrelink(q: *Queue, comp: *Compilation, tasks: []const PrelinkTask) Allocator.Error!void {
- {
- q.mutex.lock();
- defer q.mutex.unlock();
- assert(q.prelink_wait_count > 0);
- try q.queued_prelink.appendSlice(comp.gpa, tasks);
- switch (q.state) {
- .wait_for_mir => unreachable, // we've not started zcu tasks yet
- .running => return,
- .finished => {},
+pub fn finishPrelinkQueue(q: *Queue, comp: *Compilation) void {
+ if (q.future != null) {
+ q.prelink_queue.close(comp.io);
+ return;
+ }
+ // If linking non-concurrently, we must run prelink.
+ prelink: {
+ const lf = comp.bin_file orelse break :prelink;
+ if (lf.post_prelink) break :prelink;
+
+ if (lf.prelink()) |_| {
+ lf.post_prelink = true;
+ } else |err| switch (err) {
+ error.OutOfMemory => comp.link_diags.setAllocFailure(),
+ error.LinkFailure => {},
}
- // Restart the linker thread, because it was waiting for a task
- q.state = .running;
}
- comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
}
-pub fn enqueueZcu(q: *Queue, comp: *Compilation, task: ZcuTask) Allocator.Error!void {
- assert(comp.separateCodegenThreadOk());
- {
- q.mutex.lock();
- defer q.mutex.unlock();
- // If this is a `link_func` task, we might need to wait for `air_bytes_in_flight` to fall.
- if (task == .link_func) {
- const max_in_flight = max_air_bytes_in_flight -| task.link_func.air_bytes;
- while (q.air_bytes_in_flight > max_in_flight) {
- q.air_bytes_waiting = task.link_func.air_bytes;
- q.air_bytes_cond.wait(&q.mutex);
- q.air_bytes_waiting = 0;
- }
- q.air_bytes_in_flight += task.link_func.air_bytes;
- }
- try q.queued_zcu.append(comp.gpa, task);
- switch (q.state) {
- .running, .wait_for_mir => return,
- .finished => if (q.prelink_wait_count > 0) return,
- }
- // Restart the linker thread, unless it would immediately be blocked
- if (task == .link_func and task.link_func.mir.status.load(.acquire) == .pending) {
- q.state = .{ .wait_for_mir = task.link_func.func };
- return;
- }
- q.state = .running;
+pub fn finishZcuQueue(q: *Queue, comp: *Compilation) void {
+ if (q.future != null) {
+ q.zcu_queue.close(comp.io);
}
- comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
}
-fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
- q.flush_safety.lock(); // every `return` site should unlock this before unlocking `q.mutex`
- if (std.debug.runtime_safety) {
- q.mutex.lock();
- defer q.mutex.unlock();
- assert(q.state == .running);
- }
+fn runLinkTasks(q: *Queue, comp: *Compilation) void {
+ const tid = Compilation.getTid();
+ const io = comp.io;
var have_idle_tasks = true;
- prelink: while (true) {
- assert(q.wip_prelink.items.len == 0);
- swap_queues: while (true) {
- {
- q.mutex.lock();
- defer q.mutex.unlock();
- std.mem.swap(std.ArrayList(PrelinkTask), &q.queued_prelink, &q.wip_prelink);
- if (q.wip_prelink.items.len > 0) break :swap_queues;
- if (q.prelink_wait_count == 0) break :prelink; // prelink is done
- if (!have_idle_tasks) {
- // We're expecting more prelink tasks so can't move on to ZCU tasks.
- q.state = .finished;
- q.flush_safety.unlock();
- return;
- }
- }
- have_idle_tasks = link.doIdleTask(comp, tid) catch |err| switch (err) {
- error.OutOfMemory => have_idle_tasks: {
- comp.link_diags.setAllocFailure();
- break :have_idle_tasks false;
- },
- error.LinkFailure => false,
- };
- }
- for (q.wip_prelink.items) |task| {
+
+ prelink_tasks: while (true) {
+ var task_buf: [128]PrelinkTask = undefined;
+ const limit: usize = if (have_idle_tasks) 0 else 1;
+ const n = q.prelink_queue.get(io, &task_buf, limit) catch |err| switch (err) {
+ error.Canceled => return,
+ error.Closed => break :prelink_tasks,
+ };
+ if (n == 0) {
+ assert(have_idle_tasks);
+ have_idle_tasks = runIdleTask(comp, tid);
+ } else for (task_buf[0..n]) |task| {
link.doPrelinkTask(comp, task);
+ have_idle_tasks = true;
}
- have_idle_tasks = true;
- q.wip_prelink.clearRetainingCapacity();
}
// We've finished the prelink tasks, so run prelink if necessary.
@@ -263,79 +180,37 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
}
}
- // Now we can run ZCU tasks.
- while (true) {
- if (q.wip_zcu.items.len == q.wip_zcu_idx) swap_queues: {
- q.wip_zcu.clearRetainingCapacity();
- q.wip_zcu_idx = 0;
- while (true) {
- {
- q.mutex.lock();
- defer q.mutex.unlock();
- std.mem.swap(std.ArrayList(ZcuTask), &q.queued_zcu, &q.wip_zcu);
- if (q.wip_zcu.items.len > 0) break :swap_queues;
- if (!have_idle_tasks) {
- // We've exhausted all available tasks.
- q.state = .finished;
- q.flush_safety.unlock();
- return;
- }
- }
- have_idle_tasks = link.doIdleTask(comp, tid) catch |err| switch (err) {
- error.OutOfMemory => have_idle_tasks: {
- comp.link_diags.setAllocFailure();
- break :have_idle_tasks false;
- },
- error.LinkFailure => false,
- };
- }
- }
- const task = q.wip_zcu.items[q.wip_zcu_idx];
- // If the task is a `link_func`, we might have to stop until its MIR is populated.
- pending: {
- if (task != .link_func) break :pending;
- const status_ptr = &task.link_func.mir.status;
- while (true) {
- // First check without the mutex to optimize for the common case where MIR is ready.
- if (status_ptr.load(.acquire) != .pending) break :pending;
- if (have_idle_tasks) have_idle_tasks = link.doIdleTask(comp, tid) catch |err| switch (err) {
- error.OutOfMemory => have_idle_tasks: {
- comp.link_diags.setAllocFailure();
- break :have_idle_tasks false;
- },
- error.LinkFailure => false,
- };
- if (!have_idle_tasks) break;
- }
- q.mutex.lock();
- defer q.mutex.unlock();
- if (status_ptr.load(.acquire) != .pending) break :pending;
- // We will stop for now, and get restarted once this MIR is ready.
- q.state = .{ .wait_for_mir = task.link_func.func };
- q.flush_safety.unlock();
- return;
+ zcu_tasks: while (true) {
+ var task_buf: [128]ZcuTask = undefined;
+ const limit: usize = if (have_idle_tasks) 0 else 1;
+ const n = q.zcu_queue.get(io, &task_buf, limit) catch |err| switch (err) {
+ error.Canceled => return,
+ error.Closed => break :zcu_tasks,
+ };
+ if (n == 0) {
+ assert(have_idle_tasks);
+ have_idle_tasks = runIdleTask(comp, tid);
+ } else for (task_buf[0..n]) |task| {
+ link.doZcuTask(comp, tid, task);
+ have_idle_tasks = true;
}
- link.doZcuTask(comp, tid, task);
- task.deinit(comp.zcu.?);
- if (task == .link_func) {
- // Decrease `air_bytes_in_flight`, since we've finished processing this MIR.
- q.mutex.lock();
- defer q.mutex.unlock();
- q.air_bytes_in_flight -= task.link_func.air_bytes;
- if (q.air_bytes_waiting != 0 and
- q.air_bytes_in_flight <= max_air_bytes_in_flight -| q.air_bytes_waiting)
- {
- q.air_bytes_cond.signal();
- }
- }
- q.wip_zcu_idx += 1;
- have_idle_tasks = true;
}
}
+fn runIdleTask(comp: *Compilation, tid: usize) bool {
+ return link.doIdleTask(comp, tid) catch |err| switch (err) {
+ error.OutOfMemory => have_more: {
+ comp.link_diags.setAllocFailure();
+ break :have_more false;
+ },
+ error.LinkFailure => false,
+ };
+}
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
+const Io = std.Io;
+
const Compilation = @import("../Compilation.zig");
const InternPool = @import("../InternPool.zig");
const link = @import("../link.zig");
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 3ee6e842de..92307ec40c 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -3393,10 +3393,11 @@ pub fn updateExports(
pub fn loadInput(wasm: *Wasm, input: link.Input) !void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
+ const io = comp.io;
if (comp.verbose_link) {
- comp.mutex.lock(); // protect comp.arena
- defer comp.mutex.unlock();
+ comp.mutex.lockUncancelable(io); // protect comp.arena
+ defer comp.mutex.unlock(io);
const argv = &wasm.dump_argv_list;
switch (input) {
diff --git a/src/main.zig b/src/main.zig
index 2d64bf13c7..a897f2a847 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -11,7 +11,6 @@ const Allocator = mem.Allocator;
const Ast = std.zig.Ast;
const Color = std.zig.Color;
const warn = std.log.warn;
-const ThreadPool = std.Thread.Pool;
const cleanExit = std.process.cleanExit;
const Cache = std.Build.Cache;
const Path = std.Build.Cache.Path;
@@ -200,6 +199,8 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
const tr = tracy.trace(@src());
defer tr.end();
+ Compilation.setMainThread();
+
if (args.len <= 1) {
std.log.info("{s}", .{usage});
fatal("expected command argument", .{});
@@ -239,6 +240,8 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var threaded: Io.Threaded = .init(gpa);
defer threaded.deinit();
+ threaded_impl_ptr = &threaded;
+ threaded.stack_size = thread_stack_size;
const io = threaded.io();
const cmd = args[1];
@@ -3361,14 +3364,11 @@ fn buildOutputType(
},
};
- var thread_pool: ThreadPool = undefined;
- try thread_pool.init(.{
- .allocator = gpa,
- .n_jobs = @min(@max(n_jobs orelse std.Thread.getCpuCount() catch 1, 1), std.math.maxInt(Zcu.PerThread.IdBacking)),
- .track_ids = true,
- .stack_size = thread_stack_size,
- });
- defer thread_pool.deinit();
+ const thread_limit = @min(
+ @max(n_jobs orelse std.Thread.getCpuCount() catch 1, 1),
+ std.math.maxInt(Zcu.PerThread.IdBacking),
+ );
+ setThreadLimit(thread_limit);
for (create_module.c_source_files.items) |*src| {
dev.check(.c_compiler);
@@ -3461,7 +3461,7 @@ fn buildOutputType(
var create_diag: Compilation.CreateDiagnostic = undefined;
const comp = Compilation.create(gpa, arena, io, &create_diag, .{
.dirs = dirs,
- .thread_pool = &thread_pool,
+ .thread_limit = thread_limit,
.self_exe_path = switch (native_os) {
.wasi => null,
else => self_exe_path,
@@ -4150,6 +4150,7 @@ fn serve(
runtime_args_start: ?usize,
) !void {
const gpa = comp.gpa;
+ const io = comp.io;
var server = try Server.init(.{
.in = in,
@@ -4178,8 +4179,8 @@ fn serve(
const hdr = try server.receiveMessage();
// Lock the debug server while handling the message.
- if (comp.debugIncremental()) ids.mutex.lock();
- defer if (comp.debugIncremental()) ids.mutex.unlock();
+ if (comp.debugIncremental()) try ids.mutex.lock(io);
+ defer if (comp.debugIncremental()) ids.mutex.unlock(io);
switch (hdr.tag) {
.exit => return cleanExit(),
@@ -5140,14 +5141,11 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
child_argv.items[argv_index_global_cache_dir] = dirs.global_cache.path orelse cwd_path;
child_argv.items[argv_index_cache_dir] = dirs.local_cache.path orelse cwd_path;
- var thread_pool: ThreadPool = undefined;
- try thread_pool.init(.{
- .allocator = gpa,
- .n_jobs = @min(@max(n_jobs orelse std.Thread.getCpuCount() catch 1, 1), std.math.maxInt(Zcu.PerThread.IdBacking)),
- .track_ids = true,
- .stack_size = thread_stack_size,
- });
- defer thread_pool.deinit();
+ const thread_limit = @min(
+ @max(n_jobs orelse std.Thread.getCpuCount() catch 1, 1),
+ std.math.maxInt(Zcu.PerThread.IdBacking),
+ );
+ setThreadLimit(thread_limit);
// Dummy http client that is not actually used when fetch_command is unsupported.
// Prevents bootstrap from depending on a bunch of unnecessary stuff.
@@ -5376,7 +5374,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
.main_mod = build_mod,
.emit_bin = .yes_cache,
.self_exe_path = self_exe_path,
- .thread_pool = &thread_pool,
+ .thread_limit = thread_limit,
.verbose_cc = verbose_cc,
.verbose_link = verbose_link,
.verbose_air = verbose_air,
@@ -5548,14 +5546,11 @@ fn jitCmd(
);
defer dirs.deinit();
- var thread_pool: ThreadPool = undefined;
- try thread_pool.init(.{
- .allocator = gpa,
- .n_jobs = @min(@max(std.Thread.getCpuCount() catch 1, 1), std.math.maxInt(Zcu.PerThread.IdBacking)),
- .track_ids = true,
- .stack_size = thread_stack_size,
- });
- defer thread_pool.deinit();
+ const thread_limit = @min(
+ @max(std.Thread.getCpuCount() catch 1, 1),
+ std.math.maxInt(Zcu.PerThread.IdBacking),
+ );
+ setThreadLimit(thread_limit);
var child_argv: std.ArrayList([]const u8) = .empty;
try child_argv.ensureUnusedCapacity(arena, args.len + 4);
@@ -5619,7 +5614,7 @@ fn jitCmd(
.main_mod = root_mod,
.emit_bin = .yes_cache,
.self_exe_path = self_exe_path,
- .thread_pool = &thread_pool,
+ .thread_limit = thread_limit,
.cache_mode = .whole,
}) catch |err| switch (err) {
error.CreateFail => fatal("failed to create compilation: {f}", .{create_diag}),
@@ -6946,10 +6941,6 @@ fn cmdFetch(
const path_or_url = opt_path_or_url orelse fatal("missing url or path parameter", .{});
- var thread_pool: ThreadPool = undefined;
- try thread_pool.init(.{ .allocator = gpa });
- defer thread_pool.deinit();
-
var http_client: std.http.Client = .{ .allocator = gpa, .io = io };
defer http_client.deinit();
@@ -7601,3 +7592,14 @@ fn addLibDirectoryWarn2(
.path = path,
});
}
+
+var threaded_impl_ptr: *Io.Threaded = undefined;
+fn setThreadLimit(n: usize) void {
+ // We want a maximum of n total threads to keep the InternPool happy, but
+ // the main thread doesn't count towards the limits, so use n-1. Also, the
+ // linker can run concurrently, so we need to set both the async *and* the
+ // concurrency limit.
+ const limit: Io.Limit = .limited(n - 1);
+ threaded_impl_ptr.setAsyncLimit(limit);
+ threaded_impl_ptr.concurrent_limit = limit;
+}
diff --git a/src/mutable_value.zig b/src/mutable_value.zig
index 0f2be00d42..c9eb993944 100644
--- a/src/mutable_value.zig
+++ b/src/mutable_value.zig
@@ -55,6 +55,9 @@ pub const MutableValue = union(enum) {
};
pub fn intern(mv: MutableValue, pt: Zcu.PerThread, arena: Allocator) Allocator.Error!Value {
+ const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const io = comp.io;
return Value.fromInterned(switch (mv) {
.interned => |ip_index| ip_index,
.eu_payload => |sv| try pt.intern(.{ .error_union = .{
@@ -68,7 +71,7 @@ pub const MutableValue = union(enum) {
.repeated => |sv| return pt.aggregateSplatValue(.fromInterned(sv.ty), try sv.child.intern(pt, arena)),
.bytes => |b| try pt.intern(.{ .aggregate = .{
.ty = b.ty,
- .storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, pt.tid, b.data, .maybe_embedded_nulls) },
+ .storage = .{ .bytes = try zcu.intern_pool.getOrPutString(comp.gpa, io, pt.tid, b.data, .maybe_embedded_nulls) },
} }),
.aggregate => |a| {
const elems = try arena.alloc(InternPool.Index, a.elems.len);