diff options
| author | Matthew Lugg <mlugg@mlugg.co.uk> | 2025-05-16 22:42:07 +0100 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-05-16 22:42:07 +0100 |
| commit | 9064907b34128d66ffae8d15e075eddab7af0153 (patch) | |
| tree | 00cd0fb6c0a19ce56074b14947f36ef6f4ff035d /src | |
| parent | 9279ff888bd1b00d4369b4d234e31a161f02a247 (diff) | |
| parent | 46d7e808dcef3c9f9200d6cc1ed4e3a787ba054d (diff) | |
| download | zig-9064907b34128d66ffae8d15e075eddab7af0153.tar.gz zig-9064907b34128d66ffae8d15e075eddab7af0153.zip | |
Merge pull request #23907 from mlugg/ref-trace
compiler: reference trace fixes
Diffstat (limited to 'src')
| -rw-r--r-- | src/Compilation.zig | 95 | ||||
| -rw-r--r-- | src/Sema.zig | 128 | ||||
| -rw-r--r-- | src/Sema/comptime_ptr_access.zig | 2 | ||||
| -rw-r--r-- | src/Zcu.zig | 80 |
4 files changed, 221 insertions, 84 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig index 351b81c122..d3cf9e8f38 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3328,7 +3328,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (comp.zcu) |zcu| zcu_errors: { for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { if (error_msg) |msg| { - try addModuleErrorMsg(zcu, &bundle, msg.*); + try addModuleErrorMsg(zcu, &bundle, msg.*, false); } else { // Must be ZIR or Zoir errors. Note that this may include AST errors. _ = try file.getTree(gpa); // Tree must be loaded. @@ -3378,6 +3378,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { break :s entries.slice(); }; defer sorted_failed_analysis.deinit(gpa); + var added_any_analysis_error = false; for (sorted_failed_analysis.items(.key), sorted_failed_analysis.items(.value)) |anal_unit, error_msg| { if (comp.incremental) { const refs = try zcu.resolveReferences(); @@ -3389,7 +3390,9 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { zcu.fmtAnalUnit(anal_unit), }); - try addModuleErrorMsg(zcu, &bundle, error_msg.*); + try addModuleErrorMsg(zcu, &bundle, error_msg.*, added_any_analysis_error); + added_any_analysis_error = true; + if (zcu.cimport_errors.get(anal_unit)) |errors| { for (errors.getMessages()) |err_msg_index| { const err_msg = errors.getErrorMessage(err_msg_index); @@ -3412,13 +3415,13 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } } for (zcu.failed_codegen.values()) |error_msg| { - try addModuleErrorMsg(zcu, &bundle, error_msg.*); + try addModuleErrorMsg(zcu, &bundle, error_msg.*, false); } for (zcu.failed_types.values()) |error_msg| { - try addModuleErrorMsg(zcu, &bundle, error_msg.*); + try addModuleErrorMsg(zcu, &bundle, error_msg.*, false); } for (zcu.failed_exports.values()) |value| { - try addModuleErrorMsg(zcu, &bundle, value.*); + try addModuleErrorMsg(zcu, &bundle, value.*, false); } const actual_error_count = zcu.intern_pool.global_error_set.getNamesFromMainThread().len; @@ -3527,7 +3530,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { // We don't actually include the error here if `!include_compile_log_sources`. // The sorting above was still necessary, though, to get `log_text` in the right order. if (include_compile_log_sources) { - try addModuleErrorMsg(zcu, &bundle, messages.items[0]); + try addModuleErrorMsg(zcu, &bundle, messages.items[0], false); } break :compile_log_text try log_text.toOwnedSlice(gpa); @@ -3631,10 +3634,14 @@ pub const ErrorNoteHashContext = struct { } }; +const default_reference_trace_len = 2; pub fn addModuleErrorMsg( zcu: *Zcu, eb: *ErrorBundle.Wip, module_err_msg: Zcu.ErrorMsg, + /// If `-freference-trace` is not specified, we only want to show the one reference trace. + /// So, this is whether we have already emitted an error with a reference trace. + already_added_error: bool, ) !void { const gpa = eb.gpa; const ip = &zcu.intern_pool; @@ -3657,45 +3664,44 @@ pub fn addModuleErrorMsg( var ref_traces: std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace) = .empty; defer ref_traces.deinit(gpa); - if (module_err_msg.reference_trace_root.unwrap()) |rt_root| { + rt: { + const rt_root = module_err_msg.reference_trace_root.unwrap() orelse break :rt; + const max_references = zcu.comp.reference_trace orelse refs: { + if (already_added_error) break :rt; + break :refs default_reference_trace_len; + }; + const all_references = try zcu.resolveReferences(); var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .empty; defer seen.deinit(gpa); - const max_references = zcu.comp.reference_trace orelse Sema.default_reference_trace_len; - var referenced_by = rt_root; while (all_references.get(referenced_by)) |maybe_ref| { const ref = maybe_ref orelse break; const gop = try seen.getOrPut(gpa, ref.referencer); if (gop.found_existing) break; - if (ref_traces.items.len < max_references) skip: { - const src = ref.src.upgrade(zcu); - const source = try src.file_scope.getSource(gpa); - const span = try src.span(gpa); - const loc = std.zig.findLineColumn(source.bytes, span.main); - const rt_file_path = try src.file_scope.fullPath(gpa); - defer gpa.free(rt_file_path); - const name = switch (ref.referencer.unwrap()) { + if (ref_traces.items.len < max_references) { + var last_call_src = ref.src; + var opt_inline_frame = ref.inline_frame; + while (opt_inline_frame.unwrap()) |inline_frame| { + const f = inline_frame.ptr(zcu).*; + const func_nav = ip.indexToKey(f.callee).func.owner_nav; + const func_name = ip.getNav(func_nav).name.toSlice(ip); + try addReferenceTraceFrame(zcu, eb, &ref_traces, func_name, last_call_src, true); + last_call_src = f.call_src; + opt_inline_frame = f.parent; + } + const root_name: ?[]const u8 = switch (ref.referencer.unwrap()) { .@"comptime" => "comptime", .nav_val, .nav_ty => |nav| ip.getNav(nav).name.toSlice(ip), .type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), .func => |f| ip.getNav(zcu.funcInfo(f).owner_nav).name.toSlice(ip), - .memoized_state => break :skip, + .memoized_state => null, }; - try ref_traces.append(gpa, .{ - .decl_name = try eb.addString(name), - .src_loc = try eb.addSourceLocation(.{ - .src_path = try eb.addString(rt_file_path), - .span_start = span.start, - .span_main = span.main, - .span_end = span.end, - .line = @intCast(loc.line), - .column = @intCast(loc.column), - .source_line = 0, - }), - }); + if (root_name) |n| { + try addReferenceTraceFrame(zcu, eb, &ref_traces, n, last_call_src, false); + } } referenced_by = ref.referencer; } @@ -3775,6 +3781,35 @@ pub fn addModuleErrorMsg( } } +fn addReferenceTraceFrame( + zcu: *Zcu, + eb: *ErrorBundle.Wip, + ref_traces: *std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace), + name: []const u8, + lazy_src: Zcu.LazySrcLoc, + inlined: bool, +) !void { + const gpa = zcu.gpa; + const src = lazy_src.upgrade(zcu); + const source = try src.file_scope.getSource(gpa); + const span = try src.span(gpa); + const loc = std.zig.findLineColumn(source.bytes, span.main); + const rt_file_path = try src.file_scope.fullPath(gpa); + defer gpa.free(rt_file_path); + try ref_traces.append(gpa, .{ + .decl_name = try eb.printString("{s}{s}", .{ name, if (inlined) " [inlined]" else "" }), + .src_loc = try eb.addSourceLocation(.{ + .src_path = try eb.addString(rt_file_path), + .span_start = span.start, + .span_main = span.main, + .span_end = span.end, + .line = @intCast(loc.line), + .column = @intCast(loc.column), + .source_line = 0, + }), + }); +} + pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Zcu.File) !void { const gpa = eb.gpa; const src_path = try file.fullPath(gpa); diff --git a/src/Sema.zig b/src/Sema.zig index 8889b475fd..41453d2bba 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -191,7 +191,6 @@ const LowerZon = @import("Sema/LowerZon.zig"); const arith = @import("Sema/arith.zig"); pub const default_branch_quota = 1000; -pub const default_reference_trace_len = 2; pub const InferredErrorSet = struct { /// The function body from which this error set originates. @@ -445,10 +444,31 @@ pub const Block = struct { pub const Inlining = struct { call_block: *Block, call_src: LazySrcLoc, - has_comptime_args: bool, func: InternPool.Index, + + /// Populated lazily by `refFrame`. + ref_frame: Zcu.InlineReferenceFrame.Index.Optional = .none, + + /// If `true`, the following fields are `undefined`. This doesn't represent a true inline + /// call, but rather a generic call analyzing the instantiation's generic type bodies. + is_generic_instantiation: bool, + + has_comptime_args: bool, comptime_result: Air.Inst.Ref, merges: Merges, + + fn refFrame(inlining: *Inlining, zcu: *Zcu) Allocator.Error!Zcu.InlineReferenceFrame.Index { + if (inlining.ref_frame == .none) { + inlining.ref_frame = (try zcu.addInlineReferenceFrame(.{ + .callee = inlining.func, + .call_src = inlining.call_src, + .parent = if (inlining.call_block.inlining) |parent_inlining| p: { + break :p (try parent_inlining.refFrame(zcu)).toOptional(); + } else .none, + })).toOptional(); + } + return inlining.ref_frame.unwrap().?; + } }; pub const Merges = struct { @@ -2580,7 +2600,7 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Zcu.ErrorMsg if (build_options.enable_debug_extensions and zcu.comp.debug_compile_errors) { var wip_errors: std.zig.ErrorBundle.Wip = undefined; wip_errors.init(gpa) catch @panic("out of memory"); - Compilation.addModuleErrorMsg(zcu, &wip_errors, err_msg.*) catch @panic("out of memory"); + Compilation.addModuleErrorMsg(zcu, &wip_errors, err_msg.*, false) catch @panic("out of memory"); std.debug.print("compile error during Sema:\n", .{}); var error_bundle = wip_errors.toOwnedBundle("") catch @panic("out of memory"); error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); @@ -2590,20 +2610,17 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Zcu.ErrorMsg if (block) |start_block| { var block_it = start_block; while (block_it.inlining) |inlining| { - try sema.errNote( - inlining.call_src, - err_msg, - "called from here", - .{}, - ); + const note_str = note: { + if (inlining.is_generic_instantiation) break :note "generic function instantiated here"; + if (inlining.call_block.isComptime()) break :note "called at comptime here"; + break :note "called inline here"; + }; + try sema.errNote(inlining.call_src, err_msg, "{s}", .{note_str}); block_it = inlining.call_block; } } - const use_ref_trace = if (zcu.comp.reference_trace) |n| n > 0 else zcu.failed_analysis.count() == 0; - if (use_ref_trace) { - err_msg.reference_trace_root = sema.owner.toOptional(); - } + err_msg.reference_trace_root = sema.owner.toOptional(); const gop = try zcu.failed_analysis.getOrPut(gpa, sema.owner); if (gop.found_existing) { @@ -4291,7 +4308,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com if (zcu.intern_pool.isFuncBody(val)) { const ty = Type.fromInterned(zcu.intern_pool.typeOf(val)); if (try ty.fnHasRuntimeBitsSema(pt)) { - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = val })); + try sema.addReferenceEntry(block, src, AnalUnit.wrap(.{ .func = val })); try zcu.ensureFuncBodyAnalysisQueued(val); } } @@ -6619,7 +6636,7 @@ pub fn analyzeExport( if (options.linkage == .internal) return; - try sema.ensureNavResolved(src, orig_nav_index, .fully); + try sema.ensureNavResolved(block, src, orig_nav_index, .fully); const exported_nav_index = switch (ip.indexToKey(ip.getNav(orig_nav_index).status.fully_resolved.val)) { .variable => |v| v.owner_nav, @@ -6648,7 +6665,7 @@ pub fn analyzeExport( return sema.fail(block, src, "export target cannot be extern", .{}); } - try sema.maybeQueueFuncBodyAnalysis(src, exported_nav_index); + try sema.maybeQueueFuncBodyAnalysis(block, src, exported_nav_index); try sema.exports.append(gpa, .{ .opts = options, @@ -6896,7 +6913,7 @@ fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .no_embedded_nulls, ); const nav_index = try sema.lookupIdentifier(block, src, decl_name); - return sema.analyzeNavRef(src, nav_index); + return sema.analyzeNavRef(block, src, nav_index); } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -6992,7 +7009,7 @@ fn lookupInNamespace( } for (usingnamespaces.items) |sub_ns_nav| { - try sema.ensureNavResolved(src, sub_ns_nav, .fully); + try sema.ensureNavResolved(block, src, sub_ns_nav, .fully); const sub_ns_ty = Type.fromInterned(ip.getNav(sub_ns_nav).status.fully_resolved.val); const sub_ns = zcu.namespacePtr(sub_ns_ty.getNamespaceIndex(zcu)); try checked_namespaces.put(gpa, sub_ns, {}); @@ -7724,10 +7741,11 @@ fn analyzeCall( var generic_inlining: Block.Inlining = if (func_ty_info.is_generic) .{ .call_block = block, .call_src = call_src, - .has_comptime_args = false, // unused by error reporting - .func = .none, // unused by error reporting - .comptime_result = .none, // unused by error reporting - .merges = undefined, // unused because we'll never `return` + .func = func_val.?.toIntern(), + .is_generic_instantiation = true, // this allows the following fields to be `undefined` + .has_comptime_args = undefined, + .comptime_result = undefined, + .merges = undefined, } else undefined; // This is the block in which we evaluate generic function components: that is, generic parameter @@ -8003,7 +8021,7 @@ fn analyzeCall( ref_func: { const runtime_func_val = try sema.resolveValue(runtime_func) orelse break :ref_func; if (!ip.isFuncBody(runtime_func_val.toIntern())) break :ref_func; - try sema.addReferenceEntry(call_src, .wrap(.{ .func = runtime_func_val.toIntern() })); + try sema.addReferenceEntry(block, call_src, .wrap(.{ .func = runtime_func_val.toIntern() })); try zcu.ensureFuncBodyAnalysisQueued(runtime_func_val.toIntern()); } @@ -8205,10 +8223,11 @@ fn analyzeCall( var inlining: Block.Inlining = .{ .call_block = block, .call_src = call_src, + .func = func_val.?.toIntern(), + .is_generic_instantiation = false, .has_comptime_args = for (args) |a| { if (try sema.isComptimeKnown(a)) break true; } else false, - .func = func_val.?.toIntern(), .comptime_result = undefined, .merges = .{ .block_inst = block_inst, @@ -8239,7 +8258,10 @@ fn analyzeCall( if (!inlining.has_comptime_args) { var block_it = block; while (block_it.inlining) |parent_inlining| { - if (!parent_inlining.has_comptime_args and parent_inlining.func == func_val.?.toIntern()) { + if (!parent_inlining.is_generic_instantiation and + !parent_inlining.has_comptime_args and + parent_inlining.func == func_val.?.toIntern()) + { return sema.fail(block, call_src, "inline call is recursive", .{}); } block_it = parent_inlining.call_block; @@ -17258,7 +17280,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat .@"comptime" => |index| return Air.internedToRef(index), .runtime => |index| index, .nav_val => |nav| return sema.analyzeNavVal(block, src, nav), - .nav_ref => |nav| return sema.analyzeNavRef(src, nav), + .nav_ref => |nav| return sema.analyzeNavRef(block, src, nav), }; // The comptime case is handled already above. Runtime case below. @@ -18411,7 +18433,7 @@ fn typeInfoNamespaceDecls( if (zcu.analysis_in_progress.contains(.wrap(.{ .nav_val = nav }))) { continue; } - try sema.ensureNavResolved(src, nav, .fully); + try sema.ensureNavResolved(block, src, nav, .fully); const namespace_ty = Type.fromInterned(ip.getNav(nav).status.fully_resolved.val); try sema.typeInfoNamespaceDecls(block, src, namespace_ty.getNamespaceIndex(zcu).toOptional(), declaration_ty, decl_vals, seen_namespaces); } @@ -19443,6 +19465,7 @@ fn analyzeRet( }; if (block.inlining) |inlining| { + assert(!inlining.is_generic_instantiation); // can't `return` in a generic param/ret ty expr if (block.isComptime()) { const ret_val = try sema.resolveConstValue(block, operand_src, operand, null); inlining.comptime_result = operand; @@ -27936,7 +27959,7 @@ fn namespaceLookupRef( decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const nav = try sema.namespaceLookup(block, src, namespace, decl_name) orelse return null; - return try sema.analyzeNavRef(src, nav); + return try sema.analyzeNavRef(block, src, nav); } fn namespaceLookupVal( @@ -29099,7 +29122,7 @@ fn coerceExtra( .@"extern" => |e| e.owner_nav, else => unreachable, }; - const inst_as_ptr = try sema.analyzeNavRef(inst_src, fn_nav); + const inst_as_ptr = try sema.analyzeNavRef(block, inst_src, fn_nav); return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); } @@ -30752,7 +30775,7 @@ fn coerceVarArgParam( .@"fn" => fn_ptr: { const fn_val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); const fn_nav = zcu.funcInfo(fn_val.toIntern()).owner_nav; - break :fn_ptr try sema.analyzeNavRef(inst_src, fn_nav); + break :fn_ptr try sema.analyzeNavRef(block, inst_src, fn_nav); }, .array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}), .float => float: { @@ -31762,12 +31785,13 @@ fn analyzeNavVal( src: LazySrcLoc, nav_index: InternPool.Nav.Index, ) CompileError!Air.Inst.Ref { - const ref = try sema.analyzeNavRefInner(src, nav_index, false); + const ref = try sema.analyzeNavRefInner(block, src, nav_index, false); return sema.analyzeLoad(block, src, ref, src); } fn addReferenceEntry( sema: *Sema, + opt_block: ?*Block, src: LazySrcLoc, referenced_unit: AnalUnit, ) !void { @@ -31775,10 +31799,12 @@ fn addReferenceEntry( if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return; const gop = try sema.references.getOrPut(sema.gpa, referenced_unit); if (gop.found_existing) return; - // TODO: we need to figure out how to model inline calls here. - // They aren't references in the analysis sense, but ought to show up in the reference trace! - // Would representing inline calls in the reference table cause excessive memory usage? - try zcu.addUnitReference(sema.owner, referenced_unit, src); + try zcu.addUnitReference(sema.owner, referenced_unit, src, inline_frame: { + const block = opt_block orelse break :inline_frame .none; + const inlining = block.inlining orelse break :inline_frame .none; + const frame = try inlining.refFrame(zcu); + break :inline_frame frame.toOptional(); + }); } pub fn addTypeReferenceEntry( @@ -31797,7 +31823,7 @@ fn ensureMemoizedStateResolved(sema: *Sema, src: LazySrcLoc, stage: InternPool.M const pt = sema.pt; const unit: AnalUnit = .wrap(.{ .memoized_state = stage }); - try sema.addReferenceEntry(src, unit); + try sema.addReferenceEntry(null, src, unit); try sema.declareDependency(.{ .memoized_state = stage }); if (pt.zcu.analysis_in_progress.contains(unit)) { @@ -31806,7 +31832,7 @@ fn ensureMemoizedStateResolved(sema: *Sema, src: LazySrcLoc, stage: InternPool.M try pt.ensureMemoizedStateUpToDate(stage); } -pub fn ensureNavResolved(sema: *Sema, src: LazySrcLoc, nav_index: InternPool.Nav.Index, kind: enum { type, fully }) CompileError!void { +pub fn ensureNavResolved(sema: *Sema, block: *Block, src: LazySrcLoc, nav_index: InternPool.Nav.Index, kind: enum { type, fully }) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; @@ -31829,7 +31855,7 @@ pub fn ensureNavResolved(sema: *Sema, src: LazySrcLoc, nav_index: InternPool.Nav .type => .{ .nav_ty = nav_index }, .fully => .{ .nav_val = nav_index }, }); - try sema.addReferenceEntry(src, anal_unit); + try sema.addReferenceEntry(block, src, anal_unit); if (zcu.analysis_in_progress.contains(anal_unit)) { return sema.failWithOwnedErrorMsg(null, try sema.errMsg(.{ @@ -31859,25 +31885,25 @@ fn optRefValue(sema: *Sema, opt_val: ?Value) !Value { } })); } -fn analyzeNavRef(sema: *Sema, src: LazySrcLoc, nav_index: InternPool.Nav.Index) CompileError!Air.Inst.Ref { - return sema.analyzeNavRefInner(src, nav_index, true); +fn analyzeNavRef(sema: *Sema, block: *Block, src: LazySrcLoc, nav_index: InternPool.Nav.Index) CompileError!Air.Inst.Ref { + return sema.analyzeNavRefInner(block, src, nav_index, true); } /// Analyze a reference to the `Nav` at the given index. Ensures the underlying `Nav` is analyzed. /// If this pointer will be used directly, `is_ref` must be `true`. /// If this pointer will be immediately loaded (i.e. a `decl_val` instruction), `is_ref` must be `false`. -fn analyzeNavRefInner(sema: *Sema, src: LazySrcLoc, orig_nav_index: InternPool.Nav.Index, is_ref: bool) CompileError!Air.Inst.Ref { +fn analyzeNavRefInner(sema: *Sema, block: *Block, src: LazySrcLoc, orig_nav_index: InternPool.Nav.Index, is_ref: bool) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - try sema.ensureNavResolved(src, orig_nav_index, if (is_ref) .type else .fully); + try sema.ensureNavResolved(block, src, orig_nav_index, if (is_ref) .type else .fully); const nav_index = nav: { if (ip.getNav(orig_nav_index).isExternOrFn(ip)) { // Getting a pointer to this `Nav` might mean we actually get a pointer to something else! // We need to resolve the value to know for sure. - if (is_ref) try sema.ensureNavResolved(src, orig_nav_index, .fully); + if (is_ref) try sema.ensureNavResolved(block, src, orig_nav_index, .fully); switch (ip.indexToKey(ip.getNav(orig_nav_index).status.fully_resolved.val)) { .func => |f| break :nav f.owner_nav, .@"extern" => |e| break :nav e.owner_nav, @@ -31901,7 +31927,7 @@ fn analyzeNavRefInner(sema: *Sema, src: LazySrcLoc, orig_nav_index: InternPool.N }, }); if (is_ref) { - try sema.maybeQueueFuncBodyAnalysis(src, nav_index); + try sema.maybeQueueFuncBodyAnalysis(block, src, nav_index); } return Air.internedToRef((try pt.intern(.{ .ptr = .{ .ty = ptr_ty.toIntern(), @@ -31910,7 +31936,7 @@ fn analyzeNavRefInner(sema: *Sema, src: LazySrcLoc, orig_nav_index: InternPool.N } }))); } -fn maybeQueueFuncBodyAnalysis(sema: *Sema, src: LazySrcLoc, nav_index: InternPool.Nav.Index) !void { +fn maybeQueueFuncBodyAnalysis(sema: *Sema, block: *Block, src: LazySrcLoc, nav_index: InternPool.Nav.Index) !void { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; @@ -31918,16 +31944,16 @@ fn maybeQueueFuncBodyAnalysis(sema: *Sema, src: LazySrcLoc, nav_index: InternPoo // To avoid forcing too much resolution, let's first resolve the type, and check if it's a function. // If it is, we can resolve the *value*, and queue analysis as needed. - try sema.ensureNavResolved(src, nav_index, .type); + try sema.ensureNavResolved(block, src, nav_index, .type); const nav_ty: Type = .fromInterned(ip.getNav(nav_index).typeOf(ip)); if (nav_ty.zigTypeTag(zcu) != .@"fn") return; if (!try nav_ty.fnHasRuntimeBitsSema(pt)) return; - try sema.ensureNavResolved(src, nav_index, .fully); + try sema.ensureNavResolved(block, src, nav_index, .fully); const nav_val = zcu.navValue(nav_index); if (!ip.isFuncBody(nav_val.toIntern())) return; - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = nav_val.toIntern() })); + try sema.addReferenceEntry(block, src, AnalUnit.wrap(.{ .func = nav_val.toIntern() })); try zcu.ensureFuncBodyAnalysisQueued(nav_val.toIntern()); } @@ -31943,8 +31969,8 @@ fn analyzeRef( if (try sema.resolveValue(operand)) |val| { switch (zcu.intern_pool.indexToKey(val.toIntern())) { - .@"extern" => |e| return sema.analyzeNavRef(src, e.owner_nav), - .func => |f| return sema.analyzeNavRef(src, f.owner_nav), + .@"extern" => |e| return sema.analyzeNavRef(block, src, e.owner_nav), + .func => |f| return sema.analyzeNavRef(block, src, f.owner_nav), else => return uavRef(sema, val.toIntern()), } } @@ -35508,7 +35534,7 @@ fn resolveInferredErrorSet( } // In this case we are dealing with the actual InferredErrorSet object that // corresponds to the function, not one created to track an inline/comptime call. - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = func_index })); + try sema.addReferenceEntry(block, src, AnalUnit.wrap(.{ .func = func_index })); try pt.ensureFuncBodyUpToDate(func_index); } diff --git a/src/Sema/comptime_ptr_access.zig b/src/Sema/comptime_ptr_access.zig index 2e21c31f2b..f4ac897154 100644 --- a/src/Sema/comptime_ptr_access.zig +++ b/src/Sema/comptime_ptr_access.zig @@ -228,7 +228,7 @@ fn loadComptimePtrInner( const base_val: MutableValue = switch (ptr.base_addr) { .nav => |nav| val: { - try sema.ensureNavResolved(src, nav, .fully); + try sema.ensureNavResolved(block, src, nav, .fully); const val = ip.getNav(nav).status.fully_resolved.val; switch (ip.indexToKey(val)) { .variable => return .runtime_load, diff --git a/src/Zcu.zig b/src/Zcu.zig index fab40763e8..584eb8ca3d 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -215,6 +215,9 @@ all_references: std.ArrayListUnmanaged(Reference) = .empty, /// Freelist of indices in `all_references`. free_references: std.ArrayListUnmanaged(u32) = .empty, +inline_reference_frames: std.ArrayListUnmanaged(InlineReferenceFrame) = .empty, +free_inline_reference_frames: std.ArrayListUnmanaged(InlineReferenceFrame.Index) = .empty, + /// Key is the `AnalUnit` *performing* the reference. This representation allows /// incremental updates to quickly delete references caused by a specific `AnalUnit`. /// Value is index into `all_type_reference` of the first reference triggered by the unit. @@ -583,6 +586,42 @@ pub const Reference = struct { next: u32, /// The source location of the reference. src: LazySrcLoc, + /// If not `.none`, this is the index of the `InlineReferenceFrame` which should appear + /// between the referencer and `referenced` in the reference trace. These frames represent + /// inline calls, which do not create actual references (since they happen in the caller's + /// `AnalUnit`), but do show in the reference trace. + inline_frame: InlineReferenceFrame.Index.Optional, +}; + +pub const InlineReferenceFrame = struct { + /// The inline *callee*; that is, the function which was called inline. + /// The *caller* is either `parent`, or else the unit causing the original `Reference`. + callee: InternPool.Index, + /// The source location of the inline call, in the *caller*. + call_src: LazySrcLoc, + /// If not `.none`, a frame which should appear directly below this one. + /// This will be the "parent" inline call; this frame's `callee` is our caller. + parent: InlineReferenceFrame.Index.Optional, + + pub const Index = enum(u32) { + _, + pub fn ptr(idx: Index, zcu: *Zcu) *InlineReferenceFrame { + return &zcu.inline_reference_frames.items[@intFromEnum(idx)]; + } + pub fn toOptional(idx: Index) Optional { + return @enumFromInt(@intFromEnum(idx)); + } + pub const Optional = enum(u32) { + none = std.math.maxInt(u32), + _, + pub fn unwrap(opt: Optional) ?Index { + return switch (opt) { + .none => null, + _ => @enumFromInt(@intFromEnum(opt)), + }; + } + }; + }; }; pub const TypeReference = struct { @@ -3440,12 +3479,28 @@ pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void { var idx = kv.value; while (idx != std.math.maxInt(u32)) { + const ref = zcu.all_references.items[idx]; zcu.free_references.append(gpa, idx) catch { // This space will be reused eventually, so we need not propagate this error. // Just leak it for now, and let GC reclaim it later on. break :unit_refs; }; - idx = zcu.all_references.items[idx].next; + idx = ref.next; + + var opt_inline_frame = ref.inline_frame; + while (opt_inline_frame.unwrap()) |inline_frame| { + // The same inline frame could be used multiple times by one unit. We need to + // detect this case to avoid adding it to `free_inline_reference_frames` more + // than once. We do that by setting `parent` to itself as a marker. + if (inline_frame.ptr(zcu).parent == inline_frame.toOptional()) break; + zcu.free_inline_reference_frames.append(gpa, inline_frame) catch { + // This space will be reused eventually, so we need not propagate this error. + // Just leak it for now, and let GC reclaim it later on. + break :unit_refs; + }; + opt_inline_frame = inline_frame.ptr(zcu).parent; + inline_frame.ptr(zcu).parent = inline_frame.toOptional(); // signal to code above + } } } @@ -3480,7 +3535,22 @@ pub fn deleteUnitCompileLogs(zcu: *Zcu, anal_unit: AnalUnit) void { } } -pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit, ref_src: LazySrcLoc) Allocator.Error!void { +pub fn addInlineReferenceFrame(zcu: *Zcu, frame: InlineReferenceFrame) Allocator.Error!Zcu.InlineReferenceFrame.Index { + const frame_idx: InlineReferenceFrame.Index = zcu.free_inline_reference_frames.pop() orelse idx: { + _ = try zcu.inline_reference_frames.addOne(zcu.gpa); + break :idx @enumFromInt(zcu.inline_reference_frames.items.len - 1); + }; + frame_idx.ptr(zcu).* = frame; + return frame_idx; +} + +pub fn addUnitReference( + zcu: *Zcu, + src_unit: AnalUnit, + referenced_unit: AnalUnit, + ref_src: LazySrcLoc, + inline_frame: InlineReferenceFrame.Index.Optional, +) Allocator.Error!void { const gpa = zcu.gpa; zcu.clearCachedResolvedReferences(); @@ -3500,6 +3570,7 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit .referenced = referenced_unit, .next = if (gop.found_existing) gop.value_ptr.* else std.math.maxInt(u32), .src = ref_src, + .inline_frame = inline_frame, }; gop.value_ptr.* = @intCast(ref_idx); @@ -3828,7 +3899,10 @@ pub fn unionTagFieldIndex(zcu: *const Zcu, loaded_union: InternPool.LoadedUnionT pub const ResolvedReference = struct { referencer: AnalUnit, + /// If `inline_frame` is not `.none`, this is the *deepest* source location in the chain of + /// inline calls. For source locations further up the inline call stack, consult `inline_frame`. src: LazySrcLoc, + inline_frame: InlineReferenceFrame.Index.Optional, }; /// Returns a mapping from an `AnalUnit` to where it is referenced. @@ -4037,6 +4111,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv try unit_queue.put(gpa, ref.referenced, .{ .referencer = unit, .src = ref.src, + .inline_frame = ref.inline_frame, }); } ref_idx = ref.next; @@ -4055,6 +4130,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv try type_queue.put(gpa, ref.referenced, .{ .referencer = unit, .src = ref.src, + .inline_frame = .none, }); } ref_idx = ref.next; |
