aboutsummaryrefslogtreecommitdiff
path: root/src/Compilation.zig
diff options
context:
space:
mode:
authorMatthew Lugg <mlugg@mlugg.co.uk>2024-07-11 23:27:13 +0100
committerGitHub <noreply@github.com>2024-07-11 23:27:13 +0100
commit80d7e260d78400b841f15e3350473650b87931a5 (patch)
tree3035bae743da24cbe1d5046469fea0a5542a8829 /src/Compilation.zig
parent45be80364659332807b527670514332a4b835f84 (diff)
parent77810f288216ef3e35f3d0df4a04351297560a5e (diff)
downloadzig-80d7e260d78400b841f15e3350473650b87931a5.tar.gz
zig-80d7e260d78400b841f15e3350473650b87931a5.zip
Merge pull request #20570 from jacobly0/fix-races
InternPool: fix more races blocking a separate codegen/linker thread
Diffstat (limited to 'src/Compilation.zig')
-rw-r--r--src/Compilation.zig129
1 files changed, 51 insertions, 78 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 118e325ed7..cc5fd1a9eb 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -1877,6 +1877,7 @@ pub fn destroy(comp: *Compilation) void {
if (comp.module) |zcu| zcu.deinit();
comp.cache_use.deinit();
comp.work_queue.deinit();
+ if (!InternPool.single_threaded) comp.codegen_work.queue.deinit();
comp.c_object_work_queue.deinit();
if (!build_options.only_core_functionality) {
comp.win32_resource_work_queue.deinit();
@@ -2119,12 +2120,14 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
}
if (comp.module) |zcu| {
+ const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main };
+
zcu.compile_log_text.shrinkAndFree(gpa, 0);
// Make sure std.zig is inside the import_table. We unconditionally need
// it for start.zig.
const std_mod = zcu.std_mod;
- _ = try zcu.importPkg(std_mod);
+ _ = try pt.importPkg(std_mod);
// Normally we rely on importing std to in turn import the root source file
// in the start code, but when using the stage1 backend that won't happen,
@@ -2133,20 +2136,19 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
// Likewise, in the case of `zig test`, the test runner is the root source file,
// and so there is nothing to import the main file.
if (comp.config.is_test) {
- _ = try zcu.importPkg(zcu.main_mod);
+ _ = try pt.importPkg(zcu.main_mod);
}
if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
- _ = try zcu.importPkg(compiler_rt_mod);
+ _ = try pt.importPkg(compiler_rt_mod);
}
// Put a work item in for every known source file to detect if
// it changed, and, if so, re-compute ZIR and then queue the job
// to update it.
try comp.astgen_work_queue.ensureUnusedCapacity(zcu.import_table.count());
- for (zcu.import_table.values(), 0..) |file, file_index_usize| {
- const file_index: Zcu.File.Index = @enumFromInt(file_index_usize);
- if (file.mod.isBuiltin()) continue;
+ for (zcu.import_table.values()) |file_index| {
+ if (zcu.fileByIndex(file_index).mod.isBuiltin()) continue;
comp.astgen_work_queue.writeItemAssumeCapacity(file_index);
}
@@ -2641,7 +2643,8 @@ fn resolveEmitLoc(
return slice.ptr;
}
-fn reportMultiModuleErrors(zcu: *Zcu) !void {
+fn reportMultiModuleErrors(pt: Zcu.PerThread) !void {
+ const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
// Some cases can give you a whole bunch of multi-module errors, which it's not helpful to
@@ -2651,14 +2654,13 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void {
// Attach the "some omitted" note to the final error message
var last_err: ?*Zcu.ErrorMsg = null;
- for (zcu.import_table.values(), 0..) |file, file_index_usize| {
+ for (zcu.import_table.values()) |file_index| {
+ const file = zcu.fileByIndex(file_index);
if (!file.multi_pkg) continue;
num_errors += 1;
if (num_errors > max_errors) continue;
- const file_index: Zcu.File.Index = @enumFromInt(file_index_usize);
-
const err = err_blk: {
// Like with errors, let's cap the number of notes to prevent a huge error spew.
const max_notes = 5;
@@ -2674,7 +2676,10 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void {
.import => |import| try Zcu.ErrorMsg.init(
gpa,
.{
- .base_node_inst = try ip.trackZir(gpa, import.file, .main_struct_inst),
+ .base_node_inst = try ip.trackZir(gpa, pt.tid, .{
+ .file = import.file,
+ .inst = .main_struct_inst,
+ }),
.offset = .{ .token_abs = import.token },
},
"imported from module {s}",
@@ -2683,7 +2688,10 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void {
.root => |pkg| try Zcu.ErrorMsg.init(
gpa,
.{
- .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst),
+ .base_node_inst = try ip.trackZir(gpa, pt.tid, .{
+ .file = file_index,
+ .inst = .main_struct_inst,
+ }),
.offset = .entire_file,
},
"root of module {s}",
@@ -2697,7 +2705,10 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void {
notes[num_notes] = try Zcu.ErrorMsg.init(
gpa,
.{
- .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst),
+ .base_node_inst = try ip.trackZir(gpa, pt.tid, .{
+ .file = file_index,
+ .inst = .main_struct_inst,
+ }),
.offset = .entire_file,
},
"{} more references omitted",
@@ -2709,7 +2720,10 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void {
const err = try Zcu.ErrorMsg.create(
gpa,
.{
- .base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst),
+ .base_node_inst = try ip.trackZir(gpa, pt.tid, .{
+ .file = file_index,
+ .inst = .main_struct_inst,
+ }),
.offset = .entire_file,
},
"file exists in multiple modules",
@@ -2749,8 +2763,9 @@ fn reportMultiModuleErrors(zcu: *Zcu) !void {
// to add this flag after reporting the errors however, as otherwise
// we'd get an error for every single downstream file, which wouldn't be
// very useful.
- for (zcu.import_table.values()) |file| {
- if (file.multi_pkg) file.recursiveMarkMultiPkg(zcu);
+ for (zcu.import_table.values()) |file_index| {
+ const file = zcu.fileByIndex(file_index);
+ if (file.multi_pkg) file.recursiveMarkMultiPkg(pt);
}
}
@@ -2774,7 +2789,7 @@ const Header = extern struct {
//extra_len: u32,
//limbs_len: u32,
//string_bytes_len: u32,
- tracked_insts_len: u32,
+ //tracked_insts_len: u32,
src_hash_deps_len: u32,
decl_val_deps_len: u32,
namespace_deps_len: u32,
@@ -2782,7 +2797,7 @@ const Header = extern struct {
first_dependency_len: u32,
dep_entries_len: u32,
free_dep_entries_len: u32,
- files_len: u32,
+ //files_len: u32,
},
};
@@ -2803,7 +2818,7 @@ pub fn saveState(comp: *Compilation) !void {
//.extra_len = @intCast(ip.extra.items.len),
//.limbs_len = @intCast(ip.limbs.items.len),
//.string_bytes_len = @intCast(ip.string_bytes.items.len),
- .tracked_insts_len = @intCast(ip.tracked_insts.count()),
+ //.tracked_insts_len = @intCast(ip.tracked_insts.count()),
.src_hash_deps_len = @intCast(ip.src_hash_deps.count()),
.decl_val_deps_len = @intCast(ip.decl_val_deps.count()),
.namespace_deps_len = @intCast(ip.namespace_deps.count()),
@@ -2811,7 +2826,7 @@ pub fn saveState(comp: *Compilation) !void {
.first_dependency_len = @intCast(ip.first_dependency.count()),
.dep_entries_len = @intCast(ip.dep_entries.items.len),
.free_dep_entries_len = @intCast(ip.free_dep_entries.items.len),
- .files_len = @intCast(ip.files.entries.len),
+ //.files_len = @intCast(ip.files.entries.len),
},
};
addBuf(&bufs_list, &bufs_len, mem.asBytes(&header));
@@ -2820,7 +2835,7 @@ pub fn saveState(comp: *Compilation) !void {
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data)));
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag)));
//addBuf(&bufs_list, &bufs_len, ip.string_bytes.items);
- addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys()));
+ //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys()));
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.keys()));
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.values()));
@@ -2836,8 +2851,8 @@ pub fn saveState(comp: *Compilation) !void {
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.dep_entries.items));
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.free_dep_entries.items));
- addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.keys()));
- addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.values()));
+ //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.keys()));
+ //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.values()));
// TODO: compilation errors
// TODO: namespaces
@@ -2929,7 +2944,7 @@ pub fn totalErrorCount(comp: *Compilation) u32 {
}
}
- if (zcu.global_error_set.entries.len - 1 > zcu.error_limit) {
+ if (zcu.intern_pool.global_error_set.mutate.list.len > zcu.error_limit) {
total += 1;
}
}
@@ -3058,7 +3073,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
try addModuleErrorMsg(zcu, &bundle, value.*, &all_references);
}
- const actual_error_count = zcu.global_error_set.entries.len - 1;
+ const actual_error_count = zcu.intern_pool.global_error_set.mutate.list.len;
if (actual_error_count > zcu.error_limit) {
try bundle.addRootErrorMessage(.{
.msg = try bundle.printString("ZCU used more errors than possible: used {d}, max {d}", .{
@@ -3443,11 +3458,12 @@ fn performAllTheWorkInner(
}
}
- if (comp.module) |mod| {
- try reportMultiModuleErrors(mod);
- try mod.flushRetryableFailures();
- mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
- mod.codegen_prog_node = main_progress_node.start("Code Generation", 0);
+ if (comp.module) |zcu| {
+ const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = .main };
+ try reportMultiModuleErrors(pt);
+ try zcu.flushRetryableFailures();
+ zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
+ zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0);
}
if (!InternPool.single_threaded) comp.thread_pool.spawnWgId(&comp.work_queue_wait_group, codegenThread, .{comp});
@@ -4131,14 +4147,6 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anye
};
}
-const AstGenSrc = union(enum) {
- root,
- import: struct {
- importing_file: Zcu.File.Index,
- import_tok: std.zig.Ast.TokenIndex,
- },
-};
-
fn workerAstGenFile(
tid: usize,
comp: *Compilation,
@@ -4148,7 +4156,7 @@ fn workerAstGenFile(
root_decl: Zcu.Decl.OptionalIndex,
prog_node: std.Progress.Node,
wg: *WaitGroup,
- src: AstGenSrc,
+ src: Zcu.AstGenSrc,
) void {
const child_prog_node = prog_node.start(file.sub_file_path, 0);
defer child_prog_node.end();
@@ -4158,7 +4166,7 @@ fn workerAstGenFile(
error.AnalysisFail => return,
else => {
file.status = .retryable_failure;
- comp.reportRetryableAstGenError(src, file_index, err) catch |oom| switch (oom) {
+ pt.reportRetryableAstGenError(src, file_index, err) catch |oom| switch (oom) {
// Swallowing this error is OK because it's implied to be OOM when
// there is a missing `failed_files` error message.
error.OutOfMemory => {},
@@ -4189,9 +4197,9 @@ fn workerAstGenFile(
comp.mutex.lock();
defer comp.mutex.unlock();
- const res = pt.zcu.importFile(file, import_path) catch continue;
+ const res = pt.importFile(file, import_path) catch continue;
if (!res.is_pkg) {
- res.file.addReference(pt.zcu.*, .{ .import = .{
+ res.file.addReference(pt.zcu, .{ .import = .{
.file = file_index,
.token = item.data.token,
} }) catch continue;
@@ -4204,7 +4212,7 @@ fn workerAstGenFile(
log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{
file.sub_file_path, import_path, import_result.file.sub_file_path,
});
- const sub_src: AstGenSrc = .{ .import = .{
+ const sub_src: Zcu.AstGenSrc = .{ .import = .{
.importing_file = file_index,
.import_tok = item.data.token,
} };
@@ -4557,41 +4565,6 @@ fn reportRetryableWin32ResourceError(
}
}
-fn reportRetryableAstGenError(
- comp: *Compilation,
- src: AstGenSrc,
- file_index: Zcu.File.Index,
- err: anyerror,
-) error{OutOfMemory}!void {
- const zcu = comp.module.?;
- const gpa = zcu.gpa;
-
- const file = zcu.fileByIndex(file_index);
- file.status = .retryable_failure;
-
- const src_loc: Zcu.LazySrcLoc = switch (src) {
- .root => .{
- .base_node_inst = try zcu.intern_pool.trackZir(gpa, file_index, .main_struct_inst),
- .offset = .entire_file,
- },
- .import => |info| .{
- .base_node_inst = try zcu.intern_pool.trackZir(gpa, info.importing_file, .main_struct_inst),
- .offset = .{ .token_abs = info.import_tok },
- },
- };
-
- const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{
- file.mod.root, file.sub_file_path, @errorName(err),
- });
- errdefer err_msg.destroy(gpa);
-
- {
- comp.mutex.lock();
- defer comp.mutex.unlock();
- try zcu.failed_files.putNoClobber(gpa, file, err_msg);
- }
-}
-
fn reportRetryableEmbedFileError(
comp: *Compilation,
embed_file: *Zcu.EmbedFile,