diff options
| author | Jacob Young <jacobly0@users.noreply.github.com> | 2024-06-15 19:57:47 -0400 |
|---|---|---|
| committer | Jacob Young <jacobly0@users.noreply.github.com> | 2024-07-07 22:59:52 -0400 |
| commit | ca02266157ee72e41068672c8ca6f928fcbf6fdf (patch) | |
| tree | d827ad6e5d0d311c4fca7fa83a32a98d3d201ac4 /src/Zcu.zig | |
| parent | 525f341f33af9b8aad53931fd5511f00a82cb090 (diff) | |
| download | zig-ca02266157ee72e41068672c8ca6f928fcbf6fdf.tar.gz zig-ca02266157ee72e41068672c8ca6f928fcbf6fdf.zip | |
Zcu: pass `PerThread` to intern pool string functions
Diffstat (limited to 'src/Zcu.zig')
| -rw-r--r-- | src/Zcu.zig | 687 |
1 files changed, 13 insertions, 674 deletions
diff --git a/src/Zcu.zig b/src/Zcu.zig index bfc70815df..c4ebc6a36b 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -420,11 +420,11 @@ pub const Decl = struct { return zcu.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(zcu, decl.name, writer); } - pub fn fullyQualifiedName(decl: Decl, zcu: *Zcu) !InternPool.NullTerminatedString { + pub fn fullyQualifiedName(decl: Decl, pt: Zcu.PerThread) !InternPool.NullTerminatedString { return if (decl.name_fully_qualified) decl.name else - zcu.namespacePtr(decl.src_namespace).fullyQualifiedName(zcu, decl.name); + pt.zcu.namespacePtr(decl.src_namespace).fullyQualifiedName(pt, decl.name); } pub fn typeOf(decl: Decl, zcu: *const Zcu) Type { @@ -688,9 +688,10 @@ pub const Namespace = struct { pub fn fullyQualifiedName( ns: Namespace, - zcu: *Zcu, + pt: Zcu.PerThread, name: InternPool.NullTerminatedString, ) !InternPool.NullTerminatedString { + const zcu = pt.zcu; const ip = &zcu.intern_pool; const count = count: { var count: usize = name.length(ip) + 1; @@ -723,7 +724,7 @@ pub const Namespace = struct { }; } - return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start, .no_embedded_nulls); + return ip.getOrPutTrailingString(gpa, pt.tid, ip.string_bytes.items.len - start, .no_embedded_nulls); } pub fn getType(ns: Namespace, zcu: *Zcu) Type { @@ -875,11 +876,12 @@ pub const File = struct { }; } - pub fn fullyQualifiedName(file: File, mod: *Module) !InternPool.NullTerminatedString { - const ip = &mod.intern_pool; + pub fn fullyQualifiedName(file: File, pt: Zcu.PerThread) !InternPool.NullTerminatedString { + const gpa = pt.zcu.gpa; + const ip = &pt.zcu.intern_pool; const start = ip.string_bytes.items.len; - try file.renderFullyQualifiedName(ip.string_bytes.writer(mod.gpa)); - return ip.getOrPutTrailingString(mod.gpa, ip.string_bytes.items.len - start, .no_embedded_nulls); + try file.renderFullyQualifiedName(ip.string_bytes.writer(gpa)); + return ip.getOrPutTrailingString(gpa, pt.tid, ip.string_bytes.items.len - start, .no_embedded_nulls); } pub fn fullPath(file: File, ally: Allocator) ![]u8 { @@ -2569,8 +2571,8 @@ pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { } // TODO https://github.com/ziglang/zig/issues/8643 -const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8; -const HackDataLayout = extern struct { +pub const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8; +pub const HackDataLayout = extern struct { data: [8]u8 align(@alignOf(Zir.Inst.Data)), safety_tag: u8, }; @@ -2580,291 +2582,11 @@ comptime { } } -pub fn astGenFile( - zcu: *Zcu, - file: *File, - /// This parameter is provided separately from `file` because it is not - /// safe to access `import_table` without a lock, and this index is needed - /// in the call to `updateZirRefs`. - file_index: File.Index, - path_digest: Cache.BinDigest, - opt_root_decl: Zcu.Decl.OptionalIndex, -) !void { - assert(!file.mod.isBuiltin()); - - const tracy = trace(@src()); - defer tracy.end(); - - const comp = zcu.comp; - const gpa = zcu.gpa; - - // In any case we need to examine the stat of the file to determine the course of action. - var source_file = try file.mod.root.openFile(file.sub_file_path, .{}); - defer source_file.close(); - - const stat = try source_file.stat(); - - const want_local_cache = file.mod == zcu.main_mod; - const hex_digest = Cache.binToHex(path_digest); - const cache_directory = if (want_local_cache) zcu.local_zir_cache else zcu.global_zir_cache; - const zir_dir = cache_directory.handle; - - // Determine whether we need to reload the file from disk and redo parsing and AstGen. - var lock: std.fs.File.Lock = switch (file.status) { - .never_loaded, .retryable_failure => lock: { - // First, load the cached ZIR code, if any. - log.debug("AstGen checking cache: {s} (local={}, digest={s})", .{ - file.sub_file_path, want_local_cache, &hex_digest, - }); - - break :lock .shared; - }, - .parse_failure, .astgen_failure, .success_zir => lock: { - const unchanged_metadata = - stat.size == file.stat.size and - stat.mtime == file.stat.mtime and - stat.inode == file.stat.inode; - - if (unchanged_metadata) { - log.debug("unmodified metadata of file: {s}", .{file.sub_file_path}); - return; - } - - log.debug("metadata changed: {s}", .{file.sub_file_path}); - - break :lock .exclusive; - }, - }; - - // We ask for a lock in order to coordinate with other zig processes. - // If another process is already working on this file, we will get the cached - // version. Likewise if we're working on AstGen and another process asks for - // the cached file, they'll get it. - const cache_file = while (true) { - break zir_dir.createFile(&hex_digest, .{ - .read = true, - .truncate = false, - .lock = lock, - }) catch |err| switch (err) { - error.NotDir => unreachable, // no dir components - error.InvalidUtf8 => unreachable, // it's a hex encoded name - error.InvalidWtf8 => unreachable, // it's a hex encoded name - error.BadPathName => unreachable, // it's a hex encoded name - error.NameTooLong => unreachable, // it's a fixed size name - error.PipeBusy => unreachable, // it's not a pipe - error.WouldBlock => unreachable, // not asking for non-blocking I/O - // There are no dir components, so you would think that this was - // unreachable, however we have observed on macOS two processes racing - // to do openat() with O_CREAT manifest in ENOENT. - error.FileNotFound => continue, - - else => |e| return e, // Retryable errors are handled at callsite. - }; - }; - defer cache_file.close(); - - while (true) { - update: { - // First we read the header to determine the lengths of arrays. - const header = cache_file.reader().readStruct(Zir.Header) catch |err| switch (err) { - // This can happen if Zig bails out of this function between creating - // the cached file and writing it. - error.EndOfStream => break :update, - else => |e| return e, - }; - const unchanged_metadata = - stat.size == header.stat_size and - stat.mtime == header.stat_mtime and - stat.inode == header.stat_inode; - - if (!unchanged_metadata) { - log.debug("AstGen cache stale: {s}", .{file.sub_file_path}); - break :update; - } - log.debug("AstGen cache hit: {s} instructions_len={d}", .{ - file.sub_file_path, header.instructions_len, - }); - - file.zir = loadZirCacheBody(gpa, header, cache_file) catch |err| switch (err) { - error.UnexpectedFileSize => { - log.warn("unexpected EOF reading cached ZIR for {s}", .{file.sub_file_path}); - break :update; - }, - else => |e| return e, - }; - file.zir_loaded = true; - file.stat = .{ - .size = header.stat_size, - .inode = header.stat_inode, - .mtime = header.stat_mtime, - }; - file.status = .success_zir; - log.debug("AstGen cached success: {s}", .{file.sub_file_path}); - - // TODO don't report compile errors until Sema @importFile - if (file.zir.hasCompileErrors()) { - { - comp.mutex.lock(); - defer comp.mutex.unlock(); - try zcu.failed_files.putNoClobber(gpa, file, null); - } - file.status = .astgen_failure; - return error.AnalysisFail; - } - return; - } - - // If we already have the exclusive lock then it is our job to update. - if (builtin.os.tag == .wasi or lock == .exclusive) break; - // Otherwise, unlock to give someone a chance to get the exclusive lock - // and then upgrade to an exclusive lock. - cache_file.unlock(); - lock = .exclusive; - try cache_file.lock(lock); - } - - // The cache is definitely stale so delete the contents to avoid an underwrite later. - cache_file.setEndPos(0) catch |err| switch (err) { - error.FileTooBig => unreachable, // 0 is not too big - - else => |e| return e, - }; - - zcu.lockAndClearFileCompileError(file); - - // If the previous ZIR does not have compile errors, keep it around - // in case parsing or new ZIR fails. In case of successful ZIR update - // at the end of this function we will free it. - // We keep the previous ZIR loaded so that we can use it - // for the update next time it does not have any compile errors. This avoids - // needlessly tossing out semantic analysis work when an error is - // temporarily introduced. - if (file.zir_loaded and !file.zir.hasCompileErrors()) { - assert(file.prev_zir == null); - const prev_zir_ptr = try gpa.create(Zir); - file.prev_zir = prev_zir_ptr; - prev_zir_ptr.* = file.zir; - file.zir = undefined; - file.zir_loaded = false; - } - file.unload(gpa); - - if (stat.size > std.math.maxInt(u32)) - return error.FileTooBig; - - const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0); - defer if (!file.source_loaded) gpa.free(source); - const amt = try source_file.readAll(source); - if (amt != stat.size) - return error.UnexpectedEndOfFile; - - file.stat = .{ - .size = stat.size, - .inode = stat.inode, - .mtime = stat.mtime, - }; - file.source = source; - file.source_loaded = true; - - file.tree = try Ast.parse(gpa, source, .zig); - file.tree_loaded = true; - - // Any potential AST errors are converted to ZIR errors here. - file.zir = try AstGen.generate(gpa, file.tree); - file.zir_loaded = true; - file.status = .success_zir; - log.debug("AstGen fresh success: {s}", .{file.sub_file_path}); - - const safety_buffer = if (data_has_safety_tag) - try gpa.alloc([8]u8, file.zir.instructions.len) - else - undefined; - defer if (data_has_safety_tag) gpa.free(safety_buffer); - const data_ptr = if (data_has_safety_tag) - if (file.zir.instructions.len == 0) - @as([*]const u8, undefined) - else - @as([*]const u8, @ptrCast(safety_buffer.ptr)) - else - @as([*]const u8, @ptrCast(file.zir.instructions.items(.data).ptr)); - if (data_has_safety_tag) { - // The `Data` union has a safety tag but in the file format we store it without. - for (file.zir.instructions.items(.data), 0..) |*data, i| { - const as_struct = @as(*const HackDataLayout, @ptrCast(data)); - safety_buffer[i] = as_struct.data; - } - } - - const header: Zir.Header = .{ - .instructions_len = @as(u32, @intCast(file.zir.instructions.len)), - .string_bytes_len = @as(u32, @intCast(file.zir.string_bytes.len)), - .extra_len = @as(u32, @intCast(file.zir.extra.len)), - - .stat_size = stat.size, - .stat_inode = stat.inode, - .stat_mtime = stat.mtime, - }; - var iovecs = [_]std.posix.iovec_const{ - .{ - .base = @as([*]const u8, @ptrCast(&header)), - .len = @sizeOf(Zir.Header), - }, - .{ - .base = @as([*]const u8, @ptrCast(file.zir.instructions.items(.tag).ptr)), - .len = file.zir.instructions.len, - }, - .{ - .base = data_ptr, - .len = file.zir.instructions.len * 8, - }, - .{ - .base = file.zir.string_bytes.ptr, - .len = file.zir.string_bytes.len, - }, - .{ - .base = @as([*]const u8, @ptrCast(file.zir.extra.ptr)), - .len = file.zir.extra.len * 4, - }, - }; - cache_file.writevAll(&iovecs) catch |err| { - log.warn("unable to write cached ZIR code for {}{s} to {}{s}: {s}", .{ - file.mod.root, file.sub_file_path, cache_directory, &hex_digest, @errorName(err), - }); - }; - - if (file.zir.hasCompileErrors()) { - { - comp.mutex.lock(); - defer comp.mutex.unlock(); - try zcu.failed_files.putNoClobber(gpa, file, null); - } - file.status = .astgen_failure; - return error.AnalysisFail; - } - - if (file.prev_zir) |prev_zir| { - try updateZirRefs(zcu, file, file_index, prev_zir.*); - // No need to keep previous ZIR. - prev_zir.deinit(gpa); - gpa.destroy(prev_zir); - file.prev_zir = null; - } - - if (opt_root_decl.unwrap()) |root_decl| { - // The root of this file must be re-analyzed, since the file has changed. - comp.mutex.lock(); - defer comp.mutex.unlock(); - - log.debug("outdated root Decl: {}", .{root_decl}); - try zcu.outdated_file_root.put(gpa, root_decl, {}); - } -} - pub fn loadZirCache(gpa: Allocator, cache_file: std.fs.File) !Zir { return loadZirCacheBody(gpa, try cache_file.reader().readStruct(Zir.Header), cache_file); } -fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) !Zir { +pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) !Zir { var instructions: std.MultiArrayList(Zir.Inst) = .{}; errdefer instructions.deinit(gpa); @@ -2930,127 +2652,6 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) return zir; } -/// This is called from the AstGen thread pool, so must acquire -/// the Compilation mutex when acting on shared state. -fn updateZirRefs(zcu: *Module, file: *File, file_index: File.Index, old_zir: Zir) !void { - const gpa = zcu.gpa; - const new_zir = file.zir; - - var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}; - defer inst_map.deinit(gpa); - - try mapOldZirToNew(gpa, old_zir, new_zir, &inst_map); - - const old_tag = old_zir.instructions.items(.tag); - const old_data = old_zir.instructions.items(.data); - - // TODO: this should be done after all AstGen workers complete, to avoid - // iterating over this full set for every updated file. - for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| { - const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw); - if (ti.file != file_index) continue; - const old_inst = ti.inst; - ti.inst = inst_map.get(ti.inst) orelse { - // Tracking failed for this instruction. Invalidate associated `src_hash` deps. - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - log.debug("tracking failed for %{d}", .{old_inst}); - try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); - continue; - }; - - if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { - if (new_zir.getAssociatedSrcHash(ti.inst)) |new_hash| { - if (std.zig.srcHashEql(old_hash, new_hash)) { - break :hash_changed; - } - log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ - old_inst, - ti.inst, - std.fmt.fmtSliceHexLower(&old_hash), - std.fmt.fmtSliceHexLower(&new_hash), - }); - } - // The source hash associated with this instruction changed - invalidate relevant dependencies. - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); - } - - // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. - const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { - .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { - .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, - else => false, - }, - else => false, - }; - if (!has_namespace) continue; - - var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; - defer old_names.deinit(zcu.gpa); - { - var it = old_zir.declIterator(old_inst); - while (it.next()) |decl_inst| { - const decl_name = old_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(old_zir)) continue, - } - const name_zir = decl_name.toString(old_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - old_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - try old_names.put(zcu.gpa, name_ip, {}); - } - } - var any_change = false; - { - var it = new_zir.declIterator(ti.inst); - while (it.next()) |decl_inst| { - const decl_name = old_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(old_zir)) continue, - } - const name_zir = decl_name.toString(old_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - old_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - if (!old_names.swapRemove(name_ip)) continue; - // Name added - any_change = true; - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace_name = .{ - .namespace = ti_idx, - .name = name_ip, - } }); - } - } - // The only elements remaining in `old_names` now are any names which were removed. - for (old_names.keys()) |name_ip| { - any_change = true; - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace_name = .{ - .namespace = ti_idx, - .name = name_ip, - } }); - } - - if (any_change) { - zcu.comp.mutex.lock(); - defer zcu.comp.mutex.unlock(); - try zcu.markDependeeOutdated(.{ .namespace = ti_idx }); - } - } -} - pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void { log.debug("outdated dependee: {}", .{dependee}); var it = zcu.intern_pool.dependencyIterator(dependee); @@ -3695,268 +3296,6 @@ fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) return bin; } -pub fn scanNamespace( - zcu: *Zcu, - namespace_index: Namespace.Index, - decls: []const Zir.Inst.Index, - parent_decl: *Decl, -) Allocator.Error!void { - const tracy = trace(@src()); - defer tracy.end(); - - const gpa = zcu.gpa; - const namespace = zcu.namespacePtr(namespace_index); - - // For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather - // than their name. We'll build an efficient mapping now, then discard the current `decls`. - var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Decl.Index) = .{}; - defer existing_by_inst.deinit(gpa); - - try existing_by_inst.ensureTotalCapacity(gpa, @intCast(namespace.decls.count())); - - for (namespace.decls.keys()) |decl_index| { - const decl = zcu.declPtr(decl_index); - existing_by_inst.putAssumeCapacityNoClobber(decl.zir_decl_index.unwrap().?, decl_index); - } - - var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; - defer seen_decls.deinit(gpa); - - try zcu.comp.work_queue.ensureUnusedCapacity(decls.len); - - namespace.decls.clearRetainingCapacity(); - try namespace.decls.ensureTotalCapacity(gpa, decls.len); - - namespace.usingnamespace_set.clearRetainingCapacity(); - - var scan_decl_iter: ScanDeclIter = .{ - .zcu = zcu, - .namespace_index = namespace_index, - .parent_decl = parent_decl, - .seen_decls = &seen_decls, - .existing_by_inst = &existing_by_inst, - .pass = .named, - }; - for (decls) |decl_inst| { - try scanDecl(&scan_decl_iter, decl_inst); - } - scan_decl_iter.pass = .unnamed; - for (decls) |decl_inst| { - try scanDecl(&scan_decl_iter, decl_inst); - } - - if (seen_decls.count() != namespace.decls.count()) { - // Do a pass over the namespace contents and remove any decls from the last update - // which were removed in this one. - var i: usize = 0; - while (i < namespace.decls.count()) { - const decl_index = namespace.decls.keys()[i]; - const decl = zcu.declPtr(decl_index); - if (!seen_decls.contains(decl.name)) { - // We must preserve namespace ordering for @typeInfo. - namespace.decls.orderedRemoveAt(i); - i -= 1; - } - } - } -} - -const ScanDeclIter = struct { - zcu: *Zcu, - namespace_index: Namespace.Index, - parent_decl: *Decl, - seen_decls: *std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), - existing_by_inst: *const std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Decl.Index), - /// Decl scanning is run in two passes, so that we can detect when a generated - /// name would clash with an explicit name and use a different one. - pass: enum { named, unnamed }, - usingnamespace_index: usize = 0, - comptime_index: usize = 0, - unnamed_test_index: usize = 0, - - fn avoidNameConflict(iter: *ScanDeclIter, comptime fmt: []const u8, args: anytype) !InternPool.NullTerminatedString { - const zcu = iter.zcu; - const gpa = zcu.gpa; - const ip = &zcu.intern_pool; - var name = try ip.getOrPutStringFmt(gpa, fmt, args, .no_embedded_nulls); - var gop = try iter.seen_decls.getOrPut(gpa, name); - var next_suffix: u32 = 0; - while (gop.found_existing) { - name = try ip.getOrPutStringFmt(gpa, "{}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls); - gop = try iter.seen_decls.getOrPut(gpa, name); - next_suffix += 1; - } - return name; - } -}; - -fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void { - const tracy = trace(@src()); - defer tracy.end(); - - const zcu = iter.zcu; - const namespace_index = iter.namespace_index; - const namespace = zcu.namespacePtr(namespace_index); - const gpa = zcu.gpa; - const zir = namespace.fileScope(zcu).zir; - const ip = &zcu.intern_pool; - - const inst_data = zir.instructions.items(.data)[@intFromEnum(decl_inst)].declaration; - const extra = zir.extraData(Zir.Inst.Declaration, inst_data.payload_index); - const declaration = extra.data; - - // Every Decl needs a name. - const decl_name: InternPool.NullTerminatedString, const kind: Decl.Kind, const is_named_test: bool = switch (declaration.name) { - .@"comptime" => info: { - if (iter.pass != .unnamed) return; - const i = iter.comptime_index; - iter.comptime_index += 1; - break :info .{ - try iter.avoidNameConflict("comptime_{d}", .{i}), - .@"comptime", - false, - }; - }, - .@"usingnamespace" => info: { - // TODO: this isn't right! These should be considered unnamed. Name conflicts can happen here. - // The problem is, we need to preserve the decl ordering for `@typeInfo`. - // I'm not bothering to fix this now, since some upcoming changes will change this code significantly anyway. - if (iter.pass != .named) return; - const i = iter.usingnamespace_index; - iter.usingnamespace_index += 1; - break :info .{ - try iter.avoidNameConflict("usingnamespace_{d}", .{i}), - .@"usingnamespace", - false, - }; - }, - .unnamed_test => info: { - if (iter.pass != .unnamed) return; - const i = iter.unnamed_test_index; - iter.unnamed_test_index += 1; - break :info .{ - try iter.avoidNameConflict("test_{d}", .{i}), - .@"test", - false, - }; - }, - .decltest => info: { - // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. - if (iter.pass != .unnamed) return; - assert(declaration.flags.has_doc_comment); - const name = zir.nullTerminatedString(@enumFromInt(zir.extra[extra.end])); - break :info .{ - try iter.avoidNameConflict("decltest.{s}", .{name}), - .@"test", - true, - }; - }, - _ => if (declaration.name.isNamedTest(zir)) info: { - // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. - if (iter.pass != .unnamed) return; - break :info .{ - try iter.avoidNameConflict("test.{s}", .{zir.nullTerminatedString(declaration.name.toString(zir).?)}), - .@"test", - true, - }; - } else info: { - if (iter.pass != .named) return; - const name = try ip.getOrPutString( - gpa, - zir.nullTerminatedString(declaration.name.toString(zir).?), - .no_embedded_nulls, - ); - try iter.seen_decls.putNoClobber(gpa, name, {}); - break :info .{ - name, - .named, - false, - }; - }, - }; - - switch (kind) { - .@"usingnamespace" => try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1), - .@"test" => try zcu.test_functions.ensureUnusedCapacity(gpa, 1), - else => {}, - } - - const parent_file_scope_index = iter.parent_decl.getFileScopeIndex(zcu); - const tracked_inst = try ip.trackZir(gpa, parent_file_scope_index, decl_inst); - - // We create a Decl for it regardless of analysis status. - - const prev_exported, const decl_index = if (iter.existing_by_inst.get(tracked_inst)) |decl_index| decl_index: { - // We need only update this existing Decl. - const decl = zcu.declPtr(decl_index); - const was_exported = decl.is_exported; - assert(decl.kind == kind); // ZIR tracking should preserve this - decl.name = decl_name; - decl.is_pub = declaration.flags.is_pub; - decl.is_exported = declaration.flags.is_export; - break :decl_index .{ was_exported, decl_index }; - } else decl_index: { - // Create and set up a new Decl. - const new_decl_index = try zcu.allocateNewDecl(namespace_index); - const new_decl = zcu.declPtr(new_decl_index); - new_decl.kind = kind; - new_decl.name = decl_name; - new_decl.is_pub = declaration.flags.is_pub; - new_decl.is_exported = declaration.flags.is_export; - new_decl.zir_decl_index = tracked_inst.toOptional(); - break :decl_index .{ false, new_decl_index }; - }; - - const decl = zcu.declPtr(decl_index); - - namespace.decls.putAssumeCapacityNoClobberContext(decl_index, {}, .{ .zcu = zcu }); - - const comp = zcu.comp; - const decl_mod = namespace.fileScope(zcu).mod; - const want_analysis = declaration.flags.is_export or switch (kind) { - .anon => unreachable, - .@"comptime" => true, - .@"usingnamespace" => a: { - namespace.usingnamespace_set.putAssumeCapacityNoClobber(decl_index, declaration.flags.is_pub); - break :a true; - }, - .named => false, - .@"test" => a: { - if (!comp.config.is_test) break :a false; - if (decl_mod != zcu.main_mod) break :a false; - if (is_named_test and comp.test_filters.len > 0) { - const decl_fqn = try namespace.fullyQualifiedName(zcu, decl_name); - const decl_fqn_slice = decl_fqn.toSlice(ip); - for (comp.test_filters) |test_filter| { - if (mem.indexOf(u8, decl_fqn_slice, test_filter)) |_| break; - } else break :a false; - } - zcu.test_functions.putAssumeCapacity(decl_index, {}); // may clobber on incremental update - break :a true; - }, - }; - - if (want_analysis) { - // We will not queue analysis if the decl has been analyzed on a previous update and - // `is_export` is unchanged. In this case, the incremental update mechanism will handle - // re-analysis for us if necessary. - if (prev_exported != declaration.flags.is_export or decl.analysis == .unreferenced) { - log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{}' decl_index={d}", .{ - namespace.fileScope(zcu).sub_file_path, decl_name.fmt(ip), decl_index, - }); - comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = decl_index }); - } - } - - if (decl.getOwnedFunction(zcu) != null) { - // TODO this logic is insufficient; namespaces we don't re-scan may still require - // updated line numbers. Look into this! - // TODO Look into detecting when this would be unnecessary by storing enough state - // in `Decl` to notice that the line number did not change. - comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index }); - } -} - /// Cancel the creation of an anon decl and delete any references to it. /// If other decls depend on this decl, they must be aborted first. pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { |
