aboutsummaryrefslogtreecommitdiff
path: root/src/Zcu/PerThread.zig
diff options
context:
space:
mode:
authorJacob Young <jacobly0@users.noreply.github.com>2024-06-15 19:57:47 -0400
committerJacob Young <jacobly0@users.noreply.github.com>2024-07-07 22:59:52 -0400
commitca02266157ee72e41068672c8ca6f928fcbf6fdf (patch)
treed827ad6e5d0d311c4fca7fa83a32a98d3d201ac4 /src/Zcu/PerThread.zig
parent525f341f33af9b8aad53931fd5511f00a82cb090 (diff)
downloadzig-ca02266157ee72e41068672c8ca6f928fcbf6fdf.tar.gz
zig-ca02266157ee72e41068672c8ca6f928fcbf6fdf.zip
Zcu: pass `PerThread` to intern pool string functions
Diffstat (limited to 'src/Zcu/PerThread.zig')
-rw-r--r--src/Zcu/PerThread.zig725
1 files changed, 705 insertions, 20 deletions
diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig
index 785a5d52e0..8cf6922345 100644
--- a/src/Zcu/PerThread.zig
+++ b/src/Zcu/PerThread.zig
@@ -5,6 +5,411 @@ tid: Id,
pub const Id = if (builtin.single_threaded) enum { main } else enum(usize) { main, _ };
+pub fn astGenFile(
+ pt: Zcu.PerThread,
+ file: *Zcu.File,
+ /// This parameter is provided separately from `file` because it is not
+ /// safe to access `import_table` without a lock, and this index is needed
+ /// in the call to `updateZirRefs`.
+ file_index: Zcu.File.Index,
+ path_digest: Cache.BinDigest,
+ opt_root_decl: Zcu.Decl.OptionalIndex,
+) !void {
+ assert(!file.mod.isBuiltin());
+
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = zcu.gpa;
+
+ // In any case we need to examine the stat of the file to determine the course of action.
+ var source_file = try file.mod.root.openFile(file.sub_file_path, .{});
+ defer source_file.close();
+
+ const stat = try source_file.stat();
+
+ const want_local_cache = file.mod == zcu.main_mod;
+ const hex_digest = Cache.binToHex(path_digest);
+ const cache_directory = if (want_local_cache) zcu.local_zir_cache else zcu.global_zir_cache;
+ const zir_dir = cache_directory.handle;
+
+ // Determine whether we need to reload the file from disk and redo parsing and AstGen.
+ var lock: std.fs.File.Lock = switch (file.status) {
+ .never_loaded, .retryable_failure => lock: {
+ // First, load the cached ZIR code, if any.
+ log.debug("AstGen checking cache: {s} (local={}, digest={s})", .{
+ file.sub_file_path, want_local_cache, &hex_digest,
+ });
+
+ break :lock .shared;
+ },
+ .parse_failure, .astgen_failure, .success_zir => lock: {
+ const unchanged_metadata =
+ stat.size == file.stat.size and
+ stat.mtime == file.stat.mtime and
+ stat.inode == file.stat.inode;
+
+ if (unchanged_metadata) {
+ log.debug("unmodified metadata of file: {s}", .{file.sub_file_path});
+ return;
+ }
+
+ log.debug("metadata changed: {s}", .{file.sub_file_path});
+
+ break :lock .exclusive;
+ },
+ };
+
+ // We ask for a lock in order to coordinate with other zig processes.
+ // If another process is already working on this file, we will get the cached
+ // version. Likewise if we're working on AstGen and another process asks for
+ // the cached file, they'll get it.
+ const cache_file = while (true) {
+ break zir_dir.createFile(&hex_digest, .{
+ .read = true,
+ .truncate = false,
+ .lock = lock,
+ }) catch |err| switch (err) {
+ error.NotDir => unreachable, // no dir components
+ error.InvalidUtf8 => unreachable, // it's a hex encoded name
+ error.InvalidWtf8 => unreachable, // it's a hex encoded name
+ error.BadPathName => unreachable, // it's a hex encoded name
+ error.NameTooLong => unreachable, // it's a fixed size name
+ error.PipeBusy => unreachable, // it's not a pipe
+ error.WouldBlock => unreachable, // not asking for non-blocking I/O
+ // There are no dir components, so you would think that this was
+ // unreachable, however we have observed on macOS two processes racing
+ // to do openat() with O_CREAT manifest in ENOENT.
+ error.FileNotFound => continue,
+
+ else => |e| return e, // Retryable errors are handled at callsite.
+ };
+ };
+ defer cache_file.close();
+
+ while (true) {
+ update: {
+ // First we read the header to determine the lengths of arrays.
+ const header = cache_file.reader().readStruct(Zir.Header) catch |err| switch (err) {
+ // This can happen if Zig bails out of this function between creating
+ // the cached file and writing it.
+ error.EndOfStream => break :update,
+ else => |e| return e,
+ };
+ const unchanged_metadata =
+ stat.size == header.stat_size and
+ stat.mtime == header.stat_mtime and
+ stat.inode == header.stat_inode;
+
+ if (!unchanged_metadata) {
+ log.debug("AstGen cache stale: {s}", .{file.sub_file_path});
+ break :update;
+ }
+ log.debug("AstGen cache hit: {s} instructions_len={d}", .{
+ file.sub_file_path, header.instructions_len,
+ });
+
+ file.zir = Zcu.loadZirCacheBody(gpa, header, cache_file) catch |err| switch (err) {
+ error.UnexpectedFileSize => {
+ log.warn("unexpected EOF reading cached ZIR for {s}", .{file.sub_file_path});
+ break :update;
+ },
+ else => |e| return e,
+ };
+ file.zir_loaded = true;
+ file.stat = .{
+ .size = header.stat_size,
+ .inode = header.stat_inode,
+ .mtime = header.stat_mtime,
+ };
+ file.status = .success_zir;
+ log.debug("AstGen cached success: {s}", .{file.sub_file_path});
+
+ // TODO don't report compile errors until Sema @importFile
+ if (file.zir.hasCompileErrors()) {
+ {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ try zcu.failed_files.putNoClobber(gpa, file, null);
+ }
+ file.status = .astgen_failure;
+ return error.AnalysisFail;
+ }
+ return;
+ }
+
+ // If we already have the exclusive lock then it is our job to update.
+ if (builtin.os.tag == .wasi or lock == .exclusive) break;
+ // Otherwise, unlock to give someone a chance to get the exclusive lock
+ // and then upgrade to an exclusive lock.
+ cache_file.unlock();
+ lock = .exclusive;
+ try cache_file.lock(lock);
+ }
+
+ // The cache is definitely stale so delete the contents to avoid an underwrite later.
+ cache_file.setEndPos(0) catch |err| switch (err) {
+ error.FileTooBig => unreachable, // 0 is not too big
+
+ else => |e| return e,
+ };
+
+ pt.lockAndClearFileCompileError(file);
+
+ // If the previous ZIR does not have compile errors, keep it around
+ // in case parsing or new ZIR fails. In case of successful ZIR update
+ // at the end of this function we will free it.
+ // We keep the previous ZIR loaded so that we can use it
+ // for the update next time it does not have any compile errors. This avoids
+ // needlessly tossing out semantic analysis work when an error is
+ // temporarily introduced.
+ if (file.zir_loaded and !file.zir.hasCompileErrors()) {
+ assert(file.prev_zir == null);
+ const prev_zir_ptr = try gpa.create(Zir);
+ file.prev_zir = prev_zir_ptr;
+ prev_zir_ptr.* = file.zir;
+ file.zir = undefined;
+ file.zir_loaded = false;
+ }
+ file.unload(gpa);
+
+ if (stat.size > std.math.maxInt(u32))
+ return error.FileTooBig;
+
+ const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0);
+ defer if (!file.source_loaded) gpa.free(source);
+ const amt = try source_file.readAll(source);
+ if (amt != stat.size)
+ return error.UnexpectedEndOfFile;
+
+ file.stat = .{
+ .size = stat.size,
+ .inode = stat.inode,
+ .mtime = stat.mtime,
+ };
+ file.source = source;
+ file.source_loaded = true;
+
+ file.tree = try Ast.parse(gpa, source, .zig);
+ file.tree_loaded = true;
+
+ // Any potential AST errors are converted to ZIR errors here.
+ file.zir = try AstGen.generate(gpa, file.tree);
+ file.zir_loaded = true;
+ file.status = .success_zir;
+ log.debug("AstGen fresh success: {s}", .{file.sub_file_path});
+
+ const safety_buffer = if (Zcu.data_has_safety_tag)
+ try gpa.alloc([8]u8, file.zir.instructions.len)
+ else
+ undefined;
+ defer if (Zcu.data_has_safety_tag) gpa.free(safety_buffer);
+ const data_ptr = if (Zcu.data_has_safety_tag)
+ if (file.zir.instructions.len == 0)
+ @as([*]const u8, undefined)
+ else
+ @as([*]const u8, @ptrCast(safety_buffer.ptr))
+ else
+ @as([*]const u8, @ptrCast(file.zir.instructions.items(.data).ptr));
+ if (Zcu.data_has_safety_tag) {
+ // The `Data` union has a safety tag but in the file format we store it without.
+ for (file.zir.instructions.items(.data), 0..) |*data, i| {
+ const as_struct: *const Zcu.HackDataLayout = @ptrCast(data);
+ safety_buffer[i] = as_struct.data;
+ }
+ }
+
+ const header: Zir.Header = .{
+ .instructions_len = @as(u32, @intCast(file.zir.instructions.len)),
+ .string_bytes_len = @as(u32, @intCast(file.zir.string_bytes.len)),
+ .extra_len = @as(u32, @intCast(file.zir.extra.len)),
+
+ .stat_size = stat.size,
+ .stat_inode = stat.inode,
+ .stat_mtime = stat.mtime,
+ };
+ var iovecs = [_]std.posix.iovec_const{
+ .{
+ .base = @as([*]const u8, @ptrCast(&header)),
+ .len = @sizeOf(Zir.Header),
+ },
+ .{
+ .base = @as([*]const u8, @ptrCast(file.zir.instructions.items(.tag).ptr)),
+ .len = file.zir.instructions.len,
+ },
+ .{
+ .base = data_ptr,
+ .len = file.zir.instructions.len * 8,
+ },
+ .{
+ .base = file.zir.string_bytes.ptr,
+ .len = file.zir.string_bytes.len,
+ },
+ .{
+ .base = @as([*]const u8, @ptrCast(file.zir.extra.ptr)),
+ .len = file.zir.extra.len * 4,
+ },
+ };
+ cache_file.writevAll(&iovecs) catch |err| {
+ log.warn("unable to write cached ZIR code for {}{s} to {}{s}: {s}", .{
+ file.mod.root, file.sub_file_path, cache_directory, &hex_digest, @errorName(err),
+ });
+ };
+
+ if (file.zir.hasCompileErrors()) {
+ {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ try zcu.failed_files.putNoClobber(gpa, file, null);
+ }
+ file.status = .astgen_failure;
+ return error.AnalysisFail;
+ }
+
+ if (file.prev_zir) |prev_zir| {
+ try pt.updateZirRefs(file, file_index, prev_zir.*);
+ // No need to keep previous ZIR.
+ prev_zir.deinit(gpa);
+ gpa.destroy(prev_zir);
+ file.prev_zir = null;
+ }
+
+ if (opt_root_decl.unwrap()) |root_decl| {
+ // The root of this file must be re-analyzed, since the file has changed.
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+
+ log.debug("outdated root Decl: {}", .{root_decl});
+ try zcu.outdated_file_root.put(gpa, root_decl, {});
+ }
+}
+
+/// This is called from the AstGen thread pool, so must acquire
+/// the Compilation mutex when acting on shared state.
+fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index, old_zir: Zir) !void {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const new_zir = file.zir;
+
+ var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{};
+ defer inst_map.deinit(gpa);
+
+ try Zcu.mapOldZirToNew(gpa, old_zir, new_zir, &inst_map);
+
+ const old_tag = old_zir.instructions.items(.tag);
+ const old_data = old_zir.instructions.items(.data);
+
+ // TODO: this should be done after all AstGen workers complete, to avoid
+ // iterating over this full set for every updated file.
+ for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| {
+ const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw);
+ if (ti.file != file_index) continue;
+ const old_inst = ti.inst;
+ ti.inst = inst_map.get(ti.inst) orelse {
+ // Tracking failed for this instruction. Invalidate associated `src_hash` deps.
+ zcu.comp.mutex.lock();
+ defer zcu.comp.mutex.unlock();
+ log.debug("tracking failed for %{d}", .{old_inst});
+ try zcu.markDependeeOutdated(.{ .src_hash = ti_idx });
+ continue;
+ };
+
+ if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: {
+ if (new_zir.getAssociatedSrcHash(ti.inst)) |new_hash| {
+ if (std.zig.srcHashEql(old_hash, new_hash)) {
+ break :hash_changed;
+ }
+ log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{
+ old_inst,
+ ti.inst,
+ std.fmt.fmtSliceHexLower(&old_hash),
+ std.fmt.fmtSliceHexLower(&new_hash),
+ });
+ }
+ // The source hash associated with this instruction changed - invalidate relevant dependencies.
+ zcu.comp.mutex.lock();
+ defer zcu.comp.mutex.unlock();
+ try zcu.markDependeeOutdated(.{ .src_hash = ti_idx });
+ }
+
+ // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies.
+ const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) {
+ .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) {
+ .struct_decl, .union_decl, .opaque_decl, .enum_decl => true,
+ else => false,
+ },
+ else => false,
+ };
+ if (!has_namespace) continue;
+
+ var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
+ defer old_names.deinit(zcu.gpa);
+ {
+ var it = old_zir.declIterator(old_inst);
+ while (it.next()) |decl_inst| {
+ const decl_name = old_zir.getDeclaration(decl_inst)[0].name;
+ switch (decl_name) {
+ .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue,
+ _ => if (decl_name.isNamedTest(old_zir)) continue,
+ }
+ const name_zir = decl_name.toString(old_zir).?;
+ const name_ip = try zcu.intern_pool.getOrPutString(
+ zcu.gpa,
+ pt.tid,
+ old_zir.nullTerminatedString(name_zir),
+ .no_embedded_nulls,
+ );
+ try old_names.put(zcu.gpa, name_ip, {});
+ }
+ }
+ var any_change = false;
+ {
+ var it = new_zir.declIterator(ti.inst);
+ while (it.next()) |decl_inst| {
+ const decl_name = old_zir.getDeclaration(decl_inst)[0].name;
+ switch (decl_name) {
+ .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue,
+ _ => if (decl_name.isNamedTest(old_zir)) continue,
+ }
+ const name_zir = decl_name.toString(old_zir).?;
+ const name_ip = try zcu.intern_pool.getOrPutString(
+ zcu.gpa,
+ pt.tid,
+ old_zir.nullTerminatedString(name_zir),
+ .no_embedded_nulls,
+ );
+ if (!old_names.swapRemove(name_ip)) continue;
+ // Name added
+ any_change = true;
+ zcu.comp.mutex.lock();
+ defer zcu.comp.mutex.unlock();
+ try zcu.markDependeeOutdated(.{ .namespace_name = .{
+ .namespace = ti_idx,
+ .name = name_ip,
+ } });
+ }
+ }
+ // The only elements remaining in `old_names` now are any names which were removed.
+ for (old_names.keys()) |name_ip| {
+ any_change = true;
+ zcu.comp.mutex.lock();
+ defer zcu.comp.mutex.unlock();
+ try zcu.markDependeeOutdated(.{ .namespace_name = .{
+ .namespace = ti_idx,
+ .name = name_ip,
+ } });
+ }
+
+ if (any_change) {
+ zcu.comp.mutex.lock();
+ defer zcu.comp.mutex.unlock();
+ try zcu.markDependeeOutdated(.{ .namespace = ti_idx });
+ }
+ }
+}
+
/// Like `ensureDeclAnalyzed`, but the Decl is a file's root Decl.
pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
if (pt.zcu.fileRootDecl(file_index).unwrap()) |existing_root| {
@@ -91,7 +496,7 @@ pub fn ensureDeclAnalyzed(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Zcu.Sem
};
}
- const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0);
+ const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0);
defer decl_prog_node.end();
break :blk pt.semaDecl(decl_index) catch |err| switch (err) {
@@ -290,7 +695,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
defer liveness.deinit(gpa);
if (build_options.enable_debug_extensions and comp.verbose_air) {
- const fqn = try decl.fullyQualifiedName(zcu);
+ const fqn = try decl.fullyQualifiedName(pt);
std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)});
@import("../print_air.zig").dump(pt, air, liveness);
std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)});
@@ -324,7 +729,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
};
}
- const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0);
+ const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0);
defer codegen_prog_node.end();
if (!air.typesFullyResolved(zcu)) {
@@ -434,7 +839,7 @@ fn getFileRootStruct(
decl.owns_tv = true;
decl.analysis = .complete;
- try zcu.scanNamespace(namespace_index, decls, decl);
+ try pt.scanNamespace(namespace_index, decls, decl);
try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index });
return wip_ty.finish(ip, decl_index, namespace_index.toOptional());
}
@@ -502,7 +907,7 @@ fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated:
const decls = file.zir.bodySlice(extra_index, decls_len);
if (!type_outdated) {
- try zcu.scanNamespace(decl.src_namespace, decls, decl);
+ try pt.scanNamespace(decl.src_namespace, decls, decl);
}
return false;
@@ -539,7 +944,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
zcu.setFileRootDecl(file_index, new_decl_index.toOptional());
zcu.namespacePtr(new_namespace_index).decl_index = new_decl_index;
- new_decl.name = try file.fullyQualifiedName(zcu);
+ new_decl.name = try file.fullyQualifiedName(pt);
new_decl.name_fully_qualified = true;
new_decl.is_pub = true;
new_decl.is_exported = false;
@@ -601,9 +1006,9 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult {
}
log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)});
- log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(zcu)).fmt(ip)});
+ log.debug("decl name '{}'", .{(try decl.fullyQualifiedName(pt)).fmt(ip)});
defer blk: {
- log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(zcu) catch break :blk).fmt(ip)});
+ log.debug("finish decl name '{}'", .{(decl.fullyQualifiedName(pt) catch break :blk).fmt(ip)});
}
const old_has_tv = decl.has_tv;
@@ -631,7 +1036,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult {
const std_file_root_decl_index = zcu.fileRootDecl(std_file_imported.file_index);
const std_decl = zcu.declPtr(std_file_root_decl_index.unwrap().?);
const std_namespace = std_decl.getInnerNamespace(zcu).?;
- const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls);
+ const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls);
const builtin_decl = zcu.declPtr(std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse break :ip_index .none);
const builtin_namespace = builtin_decl.getInnerNamespaceIndex(zcu).unwrap() orelse break :ip_index .none;
if (decl.src_namespace != builtin_namespace) break :ip_index .none;
@@ -802,7 +1207,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult {
} else if (bytes.len == 0) {
return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{});
}
- break :blk try ip.getOrPutStringOpt(gpa, bytes, .no_embedded_nulls);
+ break :blk try ip.getOrPutStringOpt(gpa, pt.tid, bytes, .no_embedded_nulls);
};
decl.@"addrspace" = blk: {
const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_val.toIntern())) {
@@ -996,7 +1401,7 @@ fn newEmbedFile(
} });
const array_val = try pt.intern(.{ .aggregate = .{
.ty = array_ty,
- .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, bytes.len, .maybe_embedded_nulls) },
+ .storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, pt.tid, bytes.len, .maybe_embedded_nulls) },
} });
const ptr_ty = (try pt.ptrType(.{
@@ -1018,7 +1423,7 @@ fn newEmbedFile(
result.* = new_file;
new_file.* = .{
- .sub_file_path = try ip.getOrPutString(gpa, sub_file_path, .no_embedded_nulls),
+ .sub_file_path = try ip.getOrPutString(gpa, pt.tid, sub_file_path, .no_embedded_nulls),
.owner = pkg,
.stat = stat,
.val = ptr_val,
@@ -1027,6 +1432,271 @@ fn newEmbedFile(
return ptr_val;
}
+pub fn scanNamespace(
+ pt: Zcu.PerThread,
+ namespace_index: Zcu.Namespace.Index,
+ decls: []const Zir.Inst.Index,
+ parent_decl: *Zcu.Decl,
+) Allocator.Error!void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const namespace = zcu.namespacePtr(namespace_index);
+
+ // For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather
+ // than their name. We'll build an efficient mapping now, then discard the current `decls`.
+ var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Zcu.Decl.Index) = .{};
+ defer existing_by_inst.deinit(gpa);
+
+ try existing_by_inst.ensureTotalCapacity(gpa, @intCast(namespace.decls.count()));
+
+ for (namespace.decls.keys()) |decl_index| {
+ const decl = zcu.declPtr(decl_index);
+ existing_by_inst.putAssumeCapacityNoClobber(decl.zir_decl_index.unwrap().?, decl_index);
+ }
+
+ var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
+ defer seen_decls.deinit(gpa);
+
+ try zcu.comp.work_queue.ensureUnusedCapacity(decls.len);
+
+ namespace.decls.clearRetainingCapacity();
+ try namespace.decls.ensureTotalCapacity(gpa, decls.len);
+
+ namespace.usingnamespace_set.clearRetainingCapacity();
+
+ var scan_decl_iter: ScanDeclIter = .{
+ .pt = pt,
+ .namespace_index = namespace_index,
+ .parent_decl = parent_decl,
+ .seen_decls = &seen_decls,
+ .existing_by_inst = &existing_by_inst,
+ .pass = .named,
+ };
+ for (decls) |decl_inst| {
+ try scan_decl_iter.scanDecl(decl_inst);
+ }
+ scan_decl_iter.pass = .unnamed;
+ for (decls) |decl_inst| {
+ try scan_decl_iter.scanDecl(decl_inst);
+ }
+
+ if (seen_decls.count() != namespace.decls.count()) {
+ // Do a pass over the namespace contents and remove any decls from the last update
+ // which were removed in this one.
+ var i: usize = 0;
+ while (i < namespace.decls.count()) {
+ const decl_index = namespace.decls.keys()[i];
+ const decl = zcu.declPtr(decl_index);
+ if (!seen_decls.contains(decl.name)) {
+ // We must preserve namespace ordering for @typeInfo.
+ namespace.decls.orderedRemoveAt(i);
+ i -= 1;
+ }
+ }
+ }
+}
+
+const ScanDeclIter = struct {
+ pt: Zcu.PerThread,
+ namespace_index: Zcu.Namespace.Index,
+ parent_decl: *Zcu.Decl,
+ seen_decls: *std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void),
+ existing_by_inst: *const std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Zcu.Decl.Index),
+ /// Decl scanning is run in two passes, so that we can detect when a generated
+ /// name would clash with an explicit name and use a different one.
+ pass: enum { named, unnamed },
+ usingnamespace_index: usize = 0,
+ comptime_index: usize = 0,
+ unnamed_test_index: usize = 0,
+
+ fn avoidNameConflict(iter: *ScanDeclIter, comptime fmt: []const u8, args: anytype) !InternPool.NullTerminatedString {
+ const pt = iter.pt;
+ const gpa = pt.zcu.gpa;
+ const ip = &pt.zcu.intern_pool;
+ var name = try ip.getOrPutStringFmt(gpa, pt.tid, fmt, args, .no_embedded_nulls);
+ var gop = try iter.seen_decls.getOrPut(gpa, name);
+ var next_suffix: u32 = 0;
+ while (gop.found_existing) {
+ name = try ip.getOrPutStringFmt(gpa, pt.tid, "{}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls);
+ gop = try iter.seen_decls.getOrPut(gpa, name);
+ next_suffix += 1;
+ }
+ return name;
+ }
+
+ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const pt = iter.pt;
+ const zcu = pt.zcu;
+ const namespace_index = iter.namespace_index;
+ const namespace = zcu.namespacePtr(namespace_index);
+ const gpa = zcu.gpa;
+ const zir = namespace.fileScope(zcu).zir;
+ const ip = &zcu.intern_pool;
+
+ const inst_data = zir.instructions.items(.data)[@intFromEnum(decl_inst)].declaration;
+ const extra = zir.extraData(Zir.Inst.Declaration, inst_data.payload_index);
+ const declaration = extra.data;
+
+ // Every Decl needs a name.
+ const decl_name: InternPool.NullTerminatedString, const kind: Zcu.Decl.Kind, const is_named_test: bool = switch (declaration.name) {
+ .@"comptime" => info: {
+ if (iter.pass != .unnamed) return;
+ const i = iter.comptime_index;
+ iter.comptime_index += 1;
+ break :info .{
+ try iter.avoidNameConflict("comptime_{d}", .{i}),
+ .@"comptime",
+ false,
+ };
+ },
+ .@"usingnamespace" => info: {
+ // TODO: this isn't right! These should be considered unnamed. Name conflicts can happen here.
+ // The problem is, we need to preserve the decl ordering for `@typeInfo`.
+ // I'm not bothering to fix this now, since some upcoming changes will change this code significantly anyway.
+ if (iter.pass != .named) return;
+ const i = iter.usingnamespace_index;
+ iter.usingnamespace_index += 1;
+ break :info .{
+ try iter.avoidNameConflict("usingnamespace_{d}", .{i}),
+ .@"usingnamespace",
+ false,
+ };
+ },
+ .unnamed_test => info: {
+ if (iter.pass != .unnamed) return;
+ const i = iter.unnamed_test_index;
+ iter.unnamed_test_index += 1;
+ break :info .{
+ try iter.avoidNameConflict("test_{d}", .{i}),
+ .@"test",
+ false,
+ };
+ },
+ .decltest => info: {
+ // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary.
+ if (iter.pass != .unnamed) return;
+ assert(declaration.flags.has_doc_comment);
+ const name = zir.nullTerminatedString(@enumFromInt(zir.extra[extra.end]));
+ break :info .{
+ try iter.avoidNameConflict("decltest.{s}", .{name}),
+ .@"test",
+ true,
+ };
+ },
+ _ => if (declaration.name.isNamedTest(zir)) info: {
+ // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary.
+ if (iter.pass != .unnamed) return;
+ break :info .{
+ try iter.avoidNameConflict("test.{s}", .{zir.nullTerminatedString(declaration.name.toString(zir).?)}),
+ .@"test",
+ true,
+ };
+ } else info: {
+ if (iter.pass != .named) return;
+ const name = try ip.getOrPutString(
+ gpa,
+ pt.tid,
+ zir.nullTerminatedString(declaration.name.toString(zir).?),
+ .no_embedded_nulls,
+ );
+ try iter.seen_decls.putNoClobber(gpa, name, {});
+ break :info .{
+ name,
+ .named,
+ false,
+ };
+ },
+ };
+
+ switch (kind) {
+ .@"usingnamespace" => try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1),
+ .@"test" => try zcu.test_functions.ensureUnusedCapacity(gpa, 1),
+ else => {},
+ }
+
+ const parent_file_scope_index = iter.parent_decl.getFileScopeIndex(zcu);
+ const tracked_inst = try ip.trackZir(gpa, parent_file_scope_index, decl_inst);
+
+ // We create a Decl for it regardless of analysis status.
+
+ const prev_exported, const decl_index = if (iter.existing_by_inst.get(tracked_inst)) |decl_index| decl_index: {
+ // We need only update this existing Decl.
+ const decl = zcu.declPtr(decl_index);
+ const was_exported = decl.is_exported;
+ assert(decl.kind == kind); // ZIR tracking should preserve this
+ decl.name = decl_name;
+ decl.is_pub = declaration.flags.is_pub;
+ decl.is_exported = declaration.flags.is_export;
+ break :decl_index .{ was_exported, decl_index };
+ } else decl_index: {
+ // Create and set up a new Decl.
+ const new_decl_index = try zcu.allocateNewDecl(namespace_index);
+ const new_decl = zcu.declPtr(new_decl_index);
+ new_decl.kind = kind;
+ new_decl.name = decl_name;
+ new_decl.is_pub = declaration.flags.is_pub;
+ new_decl.is_exported = declaration.flags.is_export;
+ new_decl.zir_decl_index = tracked_inst.toOptional();
+ break :decl_index .{ false, new_decl_index };
+ };
+
+ const decl = zcu.declPtr(decl_index);
+
+ namespace.decls.putAssumeCapacityNoClobberContext(decl_index, {}, .{ .zcu = zcu });
+
+ const comp = zcu.comp;
+ const decl_mod = namespace.fileScope(zcu).mod;
+ const want_analysis = declaration.flags.is_export or switch (kind) {
+ .anon => unreachable,
+ .@"comptime" => true,
+ .@"usingnamespace" => a: {
+ namespace.usingnamespace_set.putAssumeCapacityNoClobber(decl_index, declaration.flags.is_pub);
+ break :a true;
+ },
+ .named => false,
+ .@"test" => a: {
+ if (!comp.config.is_test) break :a false;
+ if (decl_mod != zcu.main_mod) break :a false;
+ if (is_named_test and comp.test_filters.len > 0) {
+ const decl_fqn = try namespace.fullyQualifiedName(pt, decl_name);
+ const decl_fqn_slice = decl_fqn.toSlice(ip);
+ for (comp.test_filters) |test_filter| {
+ if (std.mem.indexOf(u8, decl_fqn_slice, test_filter)) |_| break;
+ } else break :a false;
+ }
+ zcu.test_functions.putAssumeCapacity(decl_index, {}); // may clobber on incremental update
+ break :a true;
+ },
+ };
+
+ if (want_analysis) {
+ // We will not queue analysis if the decl has been analyzed on a previous update and
+ // `is_export` is unchanged. In this case, the incremental update mechanism will handle
+ // re-analysis for us if necessary.
+ if (prev_exported != declaration.flags.is_export or decl.analysis == .unreferenced) {
+ log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{}' decl_index={d}", .{
+ namespace.fileScope(zcu).sub_file_path, decl_name.fmt(ip), decl_index,
+ });
+ comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = decl_index });
+ }
+ }
+
+ if (decl.getOwnedFunction(zcu) != null) {
+ // TODO this logic is insufficient; namespaces we don't re-scan may still require
+ // updated line numbers. Look into this!
+ // TODO Look into detecting when this would be unnecessary by storing enough state
+ // in `Decl` to notice that the line number did not change.
+ comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
+ }
+ }
+};
+
pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: Allocator) Zcu.SemaError!Air {
const tracy = trace(@src());
defer tracy.end();
@@ -1038,12 +1708,12 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
- log.debug("func name '{}'", .{(try decl.fullyQualifiedName(mod)).fmt(ip)});
+ log.debug("func name '{}'", .{(try decl.fullyQualifiedName(pt)).fmt(ip)});
defer blk: {
- log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)});
+ log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(pt) catch break :blk).fmt(ip)});
}
- const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0);
+ const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(ip), 0);
defer decl_prog_node.end();
mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index }));
@@ -1273,6 +1943,19 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
};
}
+fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void {
+ switch (file.status) {
+ .success_zir, .retryable_failure => {},
+ .never_loaded, .parse_failure, .astgen_failure => {
+ pt.zcu.comp.mutex.lock();
+ defer pt.zcu.comp.mutex.unlock();
+ if (pt.zcu.failed_files.fetchSwapRemove(file)) |kv| {
+ if (kv.value) |msg| msg.destroy(pt.zcu.gpa); // Delete previous error message.
+ }
+ },
+ }
+}
+
/// Called from `Compilation.update`, after everything is done, just before
/// reporting compile errors. In this function we emit exported symbol collision
/// errors and communicate exported symbols to the linker backend.
@@ -1397,7 +2080,7 @@ pub fn populateTestFunctions(
const root_decl_index = zcu.fileRootDecl(builtin_file_index);
const root_decl = zcu.declPtr(root_decl_index.unwrap().?);
const builtin_namespace = zcu.namespacePtr(root_decl.src_namespace);
- const test_functions_str = try ip.getOrPutString(gpa, "test_functions", .no_embedded_nulls);
+ const test_functions_str = try ip.getOrPutString(gpa, pt.tid, "test_functions", .no_embedded_nulls);
const decl_index = builtin_namespace.decls.getKeyAdapted(
test_functions_str,
Zcu.DeclAdapter{ .zcu = zcu },
@@ -1424,7 +2107,7 @@ pub fn populateTestFunctions(
for (test_fn_vals, zcu.test_functions.keys()) |*test_fn_val, test_decl_index| {
const test_decl = zcu.declPtr(test_decl_index);
- const test_decl_name = try test_decl.fullyQualifiedName(zcu);
+ const test_decl_name = try test_decl.fullyQualifiedName(pt);
const test_decl_name_len = test_decl_name.length(ip);
const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = n: {
const test_name_ty = try pt.arrayType(.{
@@ -1530,7 +2213,7 @@ pub fn linkerUpdateDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !void {
const decl = zcu.declPtr(decl_index);
- const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool), 0);
+ const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(pt)).toSlice(&zcu.intern_pool), 0);
defer codegen_prog_node.end();
if (comp.bin_file) |lf| {
@@ -2064,11 +2747,11 @@ pub fn getBuiltinDecl(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Inter
const std_file_imported = zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig");
const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index).unwrap().?;
const std_namespace = zcu.declPtr(std_file_root_decl).getOwnedInnerNamespace(zcu).?;
- const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls);
+ const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls);
const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'");
pt.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt");
const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt");
- const name_str = try ip.getOrPutString(gpa, name, .no_embedded_nulls);
+ const name_str = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls);
return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt");
}
@@ -2082,6 +2765,8 @@ pub fn getBuiltinType(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Type
const Air = @import("../Air.zig");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
+const Ast = std.zig.Ast;
+const AstGen = std.zig.AstGen;
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const build_options = @import("build_options");