From db33ee45b7261c9ec62a1087cfc9377bc4e7aa8f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 3 Jul 2023 22:09:30 -0700 Subject: rework generic function calls Abridged summary: * Move `Module.Fn` into `InternPool`. * Delete a lot of confusing and problematic `Sema` logic related to generic function calls. This commit removes `Module.Fn` and replaces it with two new `InternPool.Tag` values: * `func_decl` - corresponding to a function declared in the source code. This one contains line/column numbers, zir_body_inst, etc. * `func_instance` - one for each monomorphization of a generic function. Contains a reference to the `func_decl` from whence the instantiation came, along with the `comptime` parameter values (or types in the case of `anytype`) Since `InternPool` provides deduplication on these values, these fields are now deleted from `Module`: * `monomorphed_func_keys` * `monomorphed_funcs` * `align_stack_fns` Instead of these, Sema logic for generic function instantiation now unconditionally evaluates the function prototype expression for every generic callsite. This is technically required in order for type coercions to work. The previous code had some dubious, probably wrong hacks to make things work, such as `hashUncoerced`. I'm not 100% sure how we were able to eliminate that function and still pass all the behavior tests, but I'm pretty sure things were still broken without doing type coercion for every generic function call argument. After the function prototype is evaluated, it produces a deduplicated `func_instance` `InternPool.Index` which can then be used for the generic function call. Some other nice things made by this simplification are the removal of `comptime_args_fn_inst` and `preallocated_new_func` from `Sema`, and the messy logic associated with them. I have not yet been able to measure the perf of this against master branch. On one hand, it reduces memory usage and pointer chasing of the most heavily used `InternPool` Tag - function bodies - but on the other hand, it does evaluate function prototype expressions more than before. We will soon find out. --- src/Module.zig | 686 +++++++++++++++++++++++---------------------------------- 1 file changed, 272 insertions(+), 414 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index 96be13e768..a91a24987d 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -101,16 +101,6 @@ tmp_hack_arena: std.heap.ArenaAllocator, /// This is currently only used for string literals. memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, -monomorphed_func_keys: std.ArrayListUnmanaged(InternPool.Index) = .{}, -/// The set of all the generic function instantiations. This is used so that when a generic -/// function is called twice with the same comptime parameter arguments, both calls dispatch -/// to the same function. -monomorphed_funcs: MonomorphedFuncsSet = .{}, -/// Contains the values from `@setAlignStack`. A sparse table is used here -/// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while -/// functions are many. -align_stack_fns: std.AutoHashMapUnmanaged(Fn.Index, SetAlignStack) = .{}, - /// We optimize memory usage for a compilation with no compile errors by storing the /// error messages and mapping outside of `Decl`. /// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator. @@ -189,7 +179,8 @@ reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { }) = .{}, panic_messages: [PanicId.len]Decl.OptionalIndex = .{.none} ** PanicId.len, -panic_func_index: Fn.OptionalIndex = .none, +/// The panic function body. +panic_func_index: InternPool.Index = .none, null_stack_trace: InternPool.Index = .none, pub const PanicId = enum { @@ -239,50 +230,6 @@ pub const CImportError = struct { } }; -pub const MonomorphedFuncKey = struct { func: Fn.Index, args_index: u32, args_len: u32 }; - -pub const MonomorphedFuncAdaptedKey = struct { func: Fn.Index, args: []const InternPool.Index }; - -pub const MonomorphedFuncsSet = std.HashMapUnmanaged( - MonomorphedFuncKey, - InternPool.Index, - MonomorphedFuncsContext, - std.hash_map.default_max_load_percentage, -); - -pub const MonomorphedFuncsContext = struct { - mod: *Module, - - pub fn eql(_: @This(), a: MonomorphedFuncKey, b: MonomorphedFuncKey) bool { - return std.meta.eql(a, b); - } - - pub fn hash(ctx: @This(), key: MonomorphedFuncKey) u64 { - const key_args = ctx.mod.monomorphed_func_keys.items[key.args_index..][0..key.args_len]; - return std.hash.Wyhash.hash(@intFromEnum(key.func), std.mem.sliceAsBytes(key_args)); - } -}; - -pub const MonomorphedFuncsAdaptedContext = struct { - mod: *Module, - - pub fn eql(ctx: @This(), adapted_key: MonomorphedFuncAdaptedKey, other_key: MonomorphedFuncKey) bool { - const other_key_args = ctx.mod.monomorphed_func_keys.items[other_key.args_index..][0..other_key.args_len]; - return adapted_key.func == other_key.func and std.mem.eql(InternPool.Index, adapted_key.args, other_key_args); - } - - pub fn hash(_: @This(), adapted_key: MonomorphedFuncAdaptedKey) u64 { - return std.hash.Wyhash.hash(@intFromEnum(adapted_key.func), std.mem.sliceAsBytes(adapted_key.args)); - } -}; - -pub const SetAlignStack = struct { - alignment: Alignment, - /// TODO: This needs to store a non-lazy source location for the case of an inline function - /// which does `@setAlignStack` (applying it to the caller). - src: LazySrcLoc, -}; - /// A `Module` has zero or one of these depending on whether `-femit-h` is enabled. pub const GlobalEmitH = struct { /// Where to put the output. @@ -625,13 +572,6 @@ pub const Decl = struct { function_body, }; - pub fn clearValues(decl: *Decl, mod: *Module) void { - if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { - _ = mod.align_stack_fns.remove(func); - mod.destroyFunc(func); - } - } - /// This name is relative to the containing namespace of the decl. /// The memory is owned by the containing File ZIR. pub fn getName(decl: Decl, mod: *Module) ?[:0]const u8 { @@ -816,14 +756,17 @@ pub const Decl = struct { return mod.typeToUnion(decl.val.toType()); } - /// If the Decl owns its value and it is a function, return it, - /// otherwise null. - pub fn getOwnedFunction(decl: Decl, mod: *Module) ?*Fn { - return mod.funcPtrUnwrap(decl.getOwnedFunctionIndex(mod)); + pub fn getOwnedFunction(decl: Decl, mod: *Module) ?InternPool.Key.Func { + const i = decl.getOwnedFunctionIndex(); + if (i == .none) return null; + return switch (mod.intern_pool.indexToKey(i)) { + .func => |func| func, + else => null, + }; } - pub fn getOwnedFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex { - return if (decl.owns_tv) decl.val.getFunctionIndex(mod) else .none; + pub fn getOwnedFunctionIndex(decl: Decl) InternPool.Index { + return if (decl.owns_tv) decl.val.toIntern() else .none; } /// If the Decl owns its value and it is an extern function, returns it, @@ -1385,71 +1328,39 @@ pub const ExternFn = struct { } }; -/// Some Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator. -/// Extern functions do not have this data structure; they are represented by `ExternFn` -/// instead. -pub const Fn = struct { - /// The Decl that corresponds to the function itself. - owner_decl: Decl.Index, - /// The ZIR instruction that is a function instruction. Use this to find - /// the body. We store this rather than the body directly so that when ZIR - /// is regenerated on update(), we can map this to the new corresponding - /// ZIR instruction. - zir_body_inst: Zir.Inst.Index, - /// If this is not null, this function is a generic function instantiation, and - /// there is a `TypedValue` here for each parameter of the function. - /// Non-comptime parameters are marked with a `generic_poison` for the value. - /// Non-anytype parameters are marked with a `generic_poison` for the type. - /// These never have .generic_poison for the Type - /// because the Type is needed to pass to `Type.eql` and for inserting comptime arguments - /// into the inst_map when analyzing the body of a generic function instantiation. - /// Instead, the is_anytype knowledge is communicated via `isAnytypeParam`. - comptime_args: ?[*]TypedValue, - - /// Precomputed hash for monomorphed_funcs. - /// This is important because it may be accessed when resizing monomorphed_funcs - /// while this Fn has already been added to the set, but does not have the - /// owner_decl, comptime_args, or other fields populated yet. - /// This field is undefined if comptime_args == null. - hash: u64, - - /// Relative to owner Decl. - lbrace_line: u32, - /// Relative to owner Decl. - rbrace_line: u32, - lbrace_column: u16, - rbrace_column: u16, - - /// When a generic function is instantiated, this value is inherited from the - /// active Sema context. Importantly, this value is also updated when an existing - /// generic function instantiation is found and called. - branch_quota: u32, - - /// If this is not none, this function is a generic function instantiation, and - /// this is the generic function decl from which the instance was derived. - /// This information is redundant with a combination of checking if comptime_args is - /// not null and looking at the first decl dependency of owner_decl. This redundant - /// information is useful for three reasons: - /// 1. Improved perf of monomorphed_funcs when checking the eql() function because it - /// can do two fewer pointer chases by grabbing the info from this field directly - /// instead of accessing the decl and then the dependencies set. - /// 2. While a generic function instantiation is being initialized, we need hash() - /// and eql() to work before the initialization is complete. Completing the - /// insertion into the decl dependency set has more fallible operations than simply - /// setting this field. - /// 3. I forgot what the third thing was while typing up the other two. - generic_owner_decl: Decl.OptionalIndex, - - state: Analysis, - is_cold: bool = false, - is_noinline: bool, - calls_or_awaits_errorable_fn: bool = false, +/// This struct is used to keep track of any dependencies related to functions instances +/// that return inferred error sets. Note that a function may be associated to +/// multiple different error sets, for example an inferred error set which +/// this function returns, but also any inferred error sets of called inline +/// or comptime functions. +pub const InferredErrorSet = struct { + /// The function from which this error set originates. + func: InternPool.Index, + + /// All currently known errors that this error set contains. This includes + /// direct additions via `return error.Foo;`, and possibly also errors that + /// are returned from any dependent functions. When the inferred error set is + /// fully resolved, this map contains all the errors that the function might return. + errors: NameMap = .{}, + + /// Other inferred error sets which this inferred error set should include. + inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{}, + + /// Whether the function returned anyerror. This is true if either of + /// the dependent functions returns anyerror. + is_anyerror: bool = false, + + /// Whether this error set is already fully resolved. If true, resolving + /// can skip resolving any dependents of this inferred error set. + is_resolved: bool = false, + + pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); pub const Index = enum(u32) { _, - pub fn toOptional(i: Index) OptionalIndex { - return @as(OptionalIndex, @enumFromInt(@intFromEnum(i))); + pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex { + return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(i))); } }; @@ -1457,159 +1368,37 @@ pub const Fn = struct { none = std.math.maxInt(u32), _, - pub fn init(oi: ?Index) OptionalIndex { - return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); + pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex { + return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); } - pub fn unwrap(oi: OptionalIndex) ?Index { + pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index { if (oi == .none) return null; - return @as(Index, @enumFromInt(@intFromEnum(oi))); + return @as(InferredErrorSet.Index, @enumFromInt(@intFromEnum(oi))); } }; - pub const Analysis = enum { - /// This function has not yet undergone analysis, because we have not - /// seen a potential runtime call. It may be analyzed in future. - none, - /// Analysis for this function has been queued, but not yet completed. - queued, - /// This function intentionally only has ZIR generated because it is marked - /// inline, which means no runtime version of the function will be generated. - inline_only, - in_progress, - /// There will be a corresponding ErrorMsg in Module.failed_decls - sema_failure, - /// This Fn might be OK but it depends on another Decl which did not - /// successfully complete semantic analysis. - dependency_failure, - success, - }; - - /// This struct is used to keep track of any dependencies related to functions instances - /// that return inferred error sets. Note that a function may be associated to - /// multiple different error sets, for example an inferred error set which - /// this function returns, but also any inferred error sets of called inline - /// or comptime functions. - pub const InferredErrorSet = struct { - /// The function from which this error set originates. - func: Fn.Index, - - /// All currently known errors that this error set contains. This includes - /// direct additions via `return error.Foo;`, and possibly also errors that - /// are returned from any dependent functions. When the inferred error set is - /// fully resolved, this map contains all the errors that the function might return. - errors: NameMap = .{}, - - /// Other inferred error sets which this inferred error set should include. - inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{}, - - /// Whether the function returned anyerror. This is true if either of - /// the dependent functions returns anyerror. - is_anyerror: bool = false, - - /// Whether this error set is already fully resolved. If true, resolving - /// can skip resolving any dependents of this inferred error set. - is_resolved: bool = false, - - pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); - - pub const Index = enum(u32) { - _, - - pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex { - return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(i))); - } - }; - - pub const OptionalIndex = enum(u32) { - none = std.math.maxInt(u32), - _, - - pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex { - return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); - } - - pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index { - if (oi == .none) return null; - return @as(InferredErrorSet.Index, @enumFromInt(@intFromEnum(oi))); - } - }; - - pub fn addErrorSet( - self: *InferredErrorSet, - err_set_ty: Type, - ip: *InternPool, - gpa: Allocator, - ) !void { - switch (err_set_ty.toIntern()) { - .anyerror_type => { - self.is_anyerror = true; + pub fn addErrorSet( + self: *InferredErrorSet, + err_set_ty: Type, + ip: *InternPool, + gpa: Allocator, + ) !void { + switch (err_set_ty.toIntern()) { + .anyerror_type => { + self.is_anyerror = true; + }, + else => switch (ip.indexToKey(err_set_ty.toIntern())) { + .error_set_type => |error_set_type| { + for (error_set_type.names) |name| { + try self.errors.put(gpa, name, {}); + } }, - else => switch (ip.indexToKey(err_set_ty.toIntern())) { - .error_set_type => |error_set_type| { - for (error_set_type.names) |name| { - try self.errors.put(gpa, name, {}); - } - }, - .inferred_error_set_type => |ies_index| { - try self.inferred_error_sets.put(gpa, ies_index, {}); - }, - else => unreachable, + .inferred_error_set_type => |ies_index| { + try self.inferred_error_sets.put(gpa, ies_index, {}); }, - } - } - }; - - pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool { - const file = mod.declPtr(func.owner_decl).getFileScope(mod); - - const tags = file.zir.instructions.items(.tag); - - const param_body = file.zir.getParamBody(func.zir_body_inst); - const param = param_body[index]; - - return switch (tags[param]) { - .param, .param_comptime => false, - .param_anytype, .param_anytype_comptime => true, - else => unreachable, - }; - } - - pub fn getParamName(func: Fn, mod: *Module, index: u32) [:0]const u8 { - const file = mod.declPtr(func.owner_decl).getFileScope(mod); - - const tags = file.zir.instructions.items(.tag); - const data = file.zir.instructions.items(.data); - - const param_body = file.zir.getParamBody(func.zir_body_inst); - const param = param_body[index]; - - return switch (tags[param]) { - .param, .param_comptime => blk: { - const extra = file.zir.extraData(Zir.Inst.Param, data[param].pl_tok.payload_index); - break :blk file.zir.nullTerminatedString(extra.data.name); - }, - .param_anytype, .param_anytype_comptime => blk: { - const param_data = data[param].str_tok; - break :blk param_data.get(file.zir); - }, - else => unreachable, - }; - } - - pub fn hasInferredErrorSet(func: Fn, mod: *Module) bool { - const owner_decl = mod.declPtr(func.owner_decl); - const zir = owner_decl.getFileScope(mod).zir; - const zir_tags = zir.instructions.items(.tag); - switch (zir_tags[func.zir_body_inst]) { - .func => return false, - .func_inferred => return true, - .func_fancy => { - const inst_data = zir.instructions.items(.data)[func.zir_body_inst].pl_node; - const extra = zir.extraData(Zir.Inst.FuncFancy, inst_data.payload_index); - return extra.data.bits.is_inferred_error; + else => unreachable, }, - else => unreachable, } } }; @@ -2468,6 +2257,22 @@ pub const SrcLoc = struct { } } else unreachable; }, + .call_arg => |call_arg| { + const tree = try src_loc.file_scope.getTree(gpa); + const node = src_loc.declRelativeToNodeIndex(call_arg.call_node_offset); + var buf: [1]Ast.Node.Index = undefined; + const call_full = tree.fullCall(&buf, node).?; + const src_node = call_full.ast.params[call_arg.arg_index]; + return nodeToSpan(tree, src_node); + }, + .fn_proto_param => |fn_proto_param| { + const tree = try src_loc.file_scope.getTree(gpa); + const node = src_loc.declRelativeToNodeIndex(fn_proto_param.fn_proto_node_offset); + var buf: [1]Ast.Node.Index = undefined; + const fn_proto_full = tree.fullFnProto(&buf, node).?; + const src_node = fn_proto_full.ast.params[fn_proto_param.param_index]; + return nodeToSpan(tree, src_node); + }, .node_offset_bin_lhs => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); @@ -3146,6 +2951,20 @@ pub const LazySrcLoc = union(enum) { /// Next, navigate to the corresponding capture. /// The Decl is determined contextually. for_capture_from_input: i32, + /// The source location points to the argument node of a function call. + /// The Decl is determined contextually. + call_arg: struct { + /// Points to the function call AST node. + call_node_offset: i32, + /// The index of the argument the source location points to. + arg_index: u32, + }, + fn_proto_param: struct { + /// Points to the function prototype AST node. + fn_proto_node_offset: i32, + /// The index of the parameter the source location points to. + param_index: u32, + }, pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease; @@ -3235,6 +3054,8 @@ pub const LazySrcLoc = union(enum) { .node_offset_store_operand, .for_input, .for_capture_from_input, + .call_arg, + .fn_proto_param, => .{ .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, @@ -3373,8 +3194,6 @@ pub fn deinit(mod: *Module) void { mod.global_error_set.deinit(gpa); mod.test_functions.deinit(gpa); - mod.align_stack_fns.deinit(gpa); - mod.monomorphed_funcs.deinit(gpa); mod.decls_free_list.deinit(gpa); mod.allocated_decls.deinit(gpa); @@ -3407,7 +3226,6 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { } } if (decl.src_scope) |scope| scope.decRef(gpa); - decl.clearValues(mod); decl.dependants.deinit(gpa); decl.dependencies.deinit(gpa); decl.* = undefined; @@ -3439,11 +3257,7 @@ pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { return mod.intern_pool.structPtr(index); } -pub fn funcPtr(mod: *Module, index: Fn.Index) *Fn { - return mod.intern_pool.funcPtr(index); -} - -pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.InferredErrorSet { +pub fn inferredErrorSetPtr(mod: *Module, index: InferredErrorSet.Index) *InferredErrorSet { return mod.intern_pool.inferredErrorSetPtr(index); } @@ -3457,10 +3271,6 @@ pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct { return mod.structPtr(index.unwrap() orelse return null); } -pub fn funcPtrUnwrap(mod: *Module, index: Fn.OptionalIndex) ?*Fn { - return mod.funcPtr(index.unwrap() orelse return null); -} - /// Returns true if and only if the Decl is the top level struct associated with a File. pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { const decl = mod.declPtr(decl_index); @@ -3881,6 +3691,8 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { // to re-generate ZIR for the File. try file.outdated_decls.append(gpa, root_decl); + const ip = &mod.intern_pool; + while (decl_stack.popOrNull()) |decl_index| { const decl = mod.declPtr(decl_index); // Anonymous decls and the root decl have this set to 0. We still need @@ -3918,7 +3730,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { } if (decl.getOwnedFunction(mod)) |func| { - func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse { + func.zirBodyInst(ip).* = inst_map.get(func.zir_body_inst) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; @@ -4101,11 +3913,6 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // prior to re-analysis. try mod.deleteDeclExports(decl_index); - // Similarly, `@setAlignStack` invocations will be re-discovered. - if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { - _ = mod.align_stack_fns.remove(func); - } - // Dependencies will be re-discovered, so we remove them here prior to re-analysis. for (decl.dependencies.keys()) |dep_index| { const dep = mod.declPtr(dep_index); @@ -4189,11 +3996,12 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { } } -pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void { +pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: InternPool.Index) SemaError!void { const tracy = trace(@src()); defer tracy.end(); - const func = mod.funcPtr(func_index); + const ip = &mod.intern_pool; + const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4211,7 +4019,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void => return error.AnalysisFail, .complete, .codegen_failure_retryable => { - switch (func.state) { + switch (func.analysis(ip).state) { .sema_failure, .dependency_failure => return error.AnalysisFail, .none, .queued => {}, .in_progress => unreachable, @@ -4227,11 +4035,11 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void var air = mod.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) { error.AnalysisFail => { - if (func.state == .in_progress) { + if (func.analysis(ip).state == .in_progress) { // If this decl caused the compile error, the analysis field would // be changed to indicate it was this Decl's fault. Because this // did not happen, we infer here that it was a dependency failure. - func.state = .dependency_failure; + func.analysis(ip).state = .dependency_failure; } return error.AnalysisFail; }, @@ -4251,14 +4059,14 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void if (no_bin_file and !dump_air and !dump_llvm_ir) return; - var liveness = try Liveness.analyze(gpa, air, &mod.intern_pool); + var liveness = try Liveness.analyze(gpa, air, ip); defer liveness.deinit(gpa); if (dump_air) { const fqn = try decl.getFullyQualifiedName(mod); - std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(&mod.intern_pool)}); + std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); @import("print_air.zig").dump(mod, air, liveness); - std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(&mod.intern_pool)}); + std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)}); } if (std.debug.runtime_safety) { @@ -4266,7 +4074,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void .gpa = gpa, .air = air, .liveness = liveness, - .intern_pool = &mod.intern_pool, + .intern_pool = ip, }; defer verify.deinit(); @@ -4321,8 +4129,9 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void /// analyzed, and for ensuring it can exist at runtime (see /// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body /// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`. -pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void { - const func = mod.funcPtr(func_index); +pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) !void { + const ip = &mod.intern_pool; + const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4348,7 +4157,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void { assert(decl.has_tv); - switch (func.state) { + switch (func.analysis(ip).state) { .none => {}, .queued => return, // As above, we don't need to forward errors here. @@ -4366,7 +4175,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void { // since the last update try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); } - func.state = .queued; + func.analysis(ip).state = .queued; } pub fn updateEmbedFile(mod: *Module, embed_file: *EmbedFile) SemaError!void { @@ -4490,10 +4299,8 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .code = file.zir, .owner_decl = new_decl, .owner_decl_index = new_decl_index, - .func = null, .func_index = .none, .fn_ret_ty = Type.void, - .owner_func = null, .owner_func_index = .none, .comptime_mutable_decls = &comptime_mutable_decls, }; @@ -4573,10 +4380,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, - .func = null, .func_index = .none, .fn_ret_ty = Type.void, - .owner_func = null, .owner_func_index = .none, .comptime_mutable_decls = &comptime_mutable_decls, }; @@ -4658,48 +4463,49 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return true; } - if (mod.intern_pool.indexToFunc(decl_tv.val.toIntern()).unwrap()) |func_index| { - const func = mod.funcPtr(func_index); - const owns_tv = func.owner_decl == decl_index; - if (owns_tv) { - var prev_type_has_bits = false; - var prev_is_inline = false; - var type_changed = true; - - if (decl.has_tv) { - prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); - type_changed = !decl.ty.eql(decl_tv.ty, mod); - if (decl.getOwnedFunction(mod)) |prev_func| { - prev_is_inline = prev_func.state == .inline_only; + const ip = &mod.intern_pool; + switch (ip.indexToKey(decl_tv.val.toIntern())) { + .func => |func| { + const owns_tv = func.owner_decl == decl_index; + if (owns_tv) { + var prev_type_has_bits = false; + var prev_is_inline = false; + var type_changed = true; + + if (decl.has_tv) { + prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); + type_changed = !decl.ty.eql(decl_tv.ty, mod); + if (decl.getOwnedFunction(mod)) |prev_func| { + prev_is_inline = prev_func.analysis(ip).state == .inline_only; + } } - } - decl.clearValues(mod); - - decl.ty = decl_tv.ty; - decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue(); - // linksection, align, and addrspace were already set by Sema - decl.has_tv = true; - decl.owns_tv = owns_tv; - decl.analysis = .complete; - decl.generation = mod.generation; - - const is_inline = decl.ty.fnCallingConvention(mod) == .Inline; - if (decl.is_exported) { - const export_src: LazySrcLoc = .{ .token_offset = @intFromBool(decl.is_pub) }; - if (is_inline) { - return sema.fail(&block_scope, export_src, "export of inline function", .{}); + + decl.ty = decl_tv.ty; + decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue(); + // linksection, align, and addrspace were already set by Sema + decl.has_tv = true; + decl.owns_tv = owns_tv; + decl.analysis = .complete; + decl.generation = mod.generation; + + const is_inline = decl.ty.fnCallingConvention(mod) == .Inline; + if (decl.is_exported) { + const export_src: LazySrcLoc = .{ .token_offset = @intFromBool(decl.is_pub) }; + if (is_inline) { + return sema.fail(&block_scope, export_src, "export of inline function", .{}); + } + // The scope needs to have the decl in it. + try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); } - // The scope needs to have the decl in it. - try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); + return type_changed or is_inline != prev_is_inline; } - return type_changed or is_inline != prev_is_inline; - } + }, + else => {}, } var type_changed = true; if (decl.has_tv) { type_changed = !decl.ty.eql(decl_tv.ty, mod); } - decl.clearValues(mod); decl.owns_tv = false; var queue_linker_work = false; @@ -4707,7 +4513,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { switch (decl_tv.val.toIntern()) { .generic_poison => unreachable, .unreachable_value => unreachable, - else => switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { + else => switch (ip.indexToKey(decl_tv.val.toIntern())) { .variable => |variable| if (variable.decl == decl_index) { decl.owns_tv = true; queue_linker_work = true; @@ -4743,11 +4549,11 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { } else if (bytes.len == 0) { return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{}); } - const section = try mod.intern_pool.getOrPutString(gpa, bytes); + const section = try ip.getOrPutString(gpa, bytes); break :blk section.toOptional(); }; decl.@"addrspace" = blk: { - const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { + const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_tv.val.toIntern())) { .variable => .variable, .extern_func, .func => .function, else => .constant, @@ -5309,7 +5115,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err decl.has_align = has_align; decl.has_linksection_or_addrspace = has_linksection_or_addrspace; decl.zir_decl_index = @as(u32, @intCast(decl_sub_index)); - if (decl.getOwnedFunctionIndex(mod) != .none) { + if (decl.getOwnedFunctionIndex() != .none) { switch (comp.bin_file.tag) { .coff, .elf, .macho, .plan9 => { // TODO Look into detecting when this would be unnecessary by storing enough state @@ -5386,7 +5192,6 @@ pub fn clearDecl( try namespace.deleteAllDecls(mod, outdated_decls); } } - decl.clearValues(mod); if (decl.deletion_flag) { decl.deletion_flag = false; @@ -5497,19 +5302,26 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void export_owners.deinit(mod.gpa); } -pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaError!Air { +pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocator) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; - const func = mod.funcPtr(func_index); + const ip = &mod.intern_pool; + const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); defer comptime_mutable_decls.deinit(); + // In the case of a generic function instance, this is the type of the + // instance, which has comptime parameters elided. In other words, it is + // the runtime-known parameters only, not to be confused with the + // generic_owner function type, which potentially has more parameters, + // including comptime parameters. const fn_ty = decl.ty; + const fn_ty_info = mod.typeToFunc(fn_ty).?; var sema: Sema = .{ .mod = mod, @@ -5518,18 +5330,16 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE .code = decl.getFileScope(mod).zir, .owner_decl = decl, .owner_decl_index = decl_index, - .func = func, - .func_index = func_index.toOptional(), - .fn_ret_ty = mod.typeToFunc(fn_ty).?.return_type.toType(), - .owner_func = func, - .owner_func_index = func_index.toOptional(), - .branch_quota = @max(func.branch_quota, Sema.default_branch_quota), + .func_index = func_index, + .fn_ret_ty = fn_ty_info.return_type.toType(), + .owner_func_index = func_index, + .branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota), .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); // reset in case calls to errorable functions are removed. - func.calls_or_awaits_errorable_fn = false; + func.analysis(ip).calls_or_awaits_errorable_fn = false; // First few indexes of extra are reserved and set at the end. const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len; @@ -5551,8 +5361,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE }; defer inner_block.instructions.deinit(gpa); - const fn_info = sema.code.getFnInfo(func.zir_body_inst); - const zir_tags = sema.code.instructions.items(.tag); + const fn_info = sema.code.getFnInfo(func.zirBodyInst(ip).*); // Here we are performing "runtime semantic analysis" for a function body, which means // we must map the parameter ZIR instructions to `arg` AIR instructions. @@ -5560,35 +5369,36 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE // This could be a generic function instantiation, however, in which case we need to // map the comptime parameters to constant values and only emit arg AIR instructions // for the runtime ones. - const runtime_params_len = @as(u32, @intCast(mod.typeToFunc(fn_ty).?.param_types.len)); + const runtime_params_len = fn_ty_info.param_types.len; try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len); - try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType` + try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len); try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); - var runtime_param_index: usize = 0; - var total_param_index: usize = 0; - for (fn_info.param_body) |inst| { - switch (zir_tags[inst]) { - .param, .param_comptime, .param_anytype, .param_anytype_comptime => {}, - else => continue, + // In the case of a generic function instance, pre-populate all the comptime args. + if (func.comptime_args.len != 0) { + for ( + fn_info.param_body[0..func.comptime_args.len], + func.comptime_args.get(ip), + ) |inst, comptime_arg| { + if (comptime_arg == .none) continue; + sema.inst_map.putAssumeCapacityNoClobber(inst, Air.internedToRef(comptime_arg)); } - const param_ty = if (func.comptime_args) |comptime_args| t: { - const arg_tv = comptime_args[total_param_index]; - - const arg_val = if (!arg_tv.val.isGenericPoison()) - arg_tv.val - else if (try arg_tv.ty.onePossibleValue(mod)) |opv| - opv - else - break :t arg_tv.ty; - - const arg = try sema.addConstant(arg_val); - sema.inst_map.putAssumeCapacityNoClobber(inst, arg); - total_param_index += 1; - continue; - } else mod.typeToFunc(fn_ty).?.param_types[runtime_param_index].toType(); + } + + const src_params_len = if (func.comptime_args.len != 0) + func.comptime_args.len + else + runtime_params_len; + + var runtime_param_index: usize = 0; + for (fn_info.param_body[0..src_params_len], 0..) |inst, src_param_index| { + const gop = sema.inst_map.getOrPutAssumeCapacity(inst); + if (gop.found_existing) continue; // provided above by comptime arg - const opt_opv = sema.typeHasOnePossibleValue(param_ty) catch |err| switch (err) { + const param_ty = fn_ty_info.param_types.get(ip)[runtime_param_index]; + runtime_param_index += 1; + + const opt_opv = sema.typeHasOnePossibleValue(param_ty.toType()) catch |err| switch (err) { error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, @@ -5596,28 +5406,22 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE else => |e| return e, }; if (opt_opv) |opv| { - const arg = try sema.addConstant(opv); - sema.inst_map.putAssumeCapacityNoClobber(inst, arg); - total_param_index += 1; - runtime_param_index += 1; + gop.value_ptr.* = Air.internedToRef(opv.toIntern()); continue; } - const air_ty = try sema.addType(param_ty); - const arg_index = @as(u32, @intCast(sema.air_instructions.len)); + const arg_index: u32 = @intCast(sema.air_instructions.len); + gop.value_ptr.* = Air.indexToRef(arg_index); inner_block.instructions.appendAssumeCapacity(arg_index); sema.air_instructions.appendAssumeCapacity(.{ .tag = .arg, .data = .{ .arg = .{ - .ty = air_ty, - .src_index = @as(u32, @intCast(total_param_index)), + .ty = Air.internedToRef(param_ty), + .src_index = @intCast(src_param_index), } }, }); - sema.inst_map.putAssumeCapacityNoClobber(inst, Air.indexToRef(arg_index)); - total_param_index += 1; - runtime_param_index += 1; } - func.state = .in_progress; + func.analysis(ip).state = .in_progress; const last_arg_index = inner_block.instructions.items.len; @@ -5648,7 +5452,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE } // If we don't get an error return trace from a caller, create our own. - if (func.calls_or_awaits_errorable_fn and + if (func.analysis(ip).calls_or_awaits_errorable_fn and mod.comp.bin_file.options.error_return_tracing and !sema.fn_ret_ty.isError(mod)) { @@ -5677,7 +5481,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items); sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index; - func.state = .success; + func.analysis(ip).state = .success; // Finally we must resolve the return type and parameter types so that backends // have full access to type information. @@ -5716,7 +5520,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE }; } - return Air{ + return .{ .instructions = sema.air_instructions.toOwnedSlice(), .extra = try sema.air_extra.toOwnedSlice(gpa), }; @@ -5731,9 +5535,6 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { if (mod.cimport_errors.fetchSwapRemove(decl_index)) |kv| { for (kv.value) |err| err.deinit(mod.gpa); } - if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { - _ = mod.align_stack_fns.remove(func); - } if (mod.emit_h) |emit_h| { if (emit_h.failed_decls.fetchSwapRemove(decl_index)) |kv| { kv.value.destroy(mod.gpa); @@ -5777,14 +5578,6 @@ pub fn destroyUnion(mod: *Module, index: Union.Index) void { return mod.intern_pool.destroyUnion(mod.gpa, index); } -pub fn createFunc(mod: *Module, initialization: Fn) Allocator.Error!Fn.Index { - return mod.intern_pool.createFunc(mod.gpa, initialization); -} - -pub fn destroyFunc(mod: *Module, index: Fn.Index) void { - return mod.intern_pool.destroyFunc(mod.gpa, index); -} - pub fn allocateNewDecl( mod: *Module, namespace: Namespace.Index, @@ -6578,7 +6371,6 @@ pub fn populateTestFunctions( // Since we are replacing the Decl's value we must perform cleanup on the // previous value. - decl.clearValues(mod); decl.ty = new_ty; decl.val = new_val; decl.has_tv = true; @@ -6657,7 +6449,7 @@ pub fn markReferencedDeclsAlive(mod: *Module, val: Value) Allocator.Error!void { switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| try mod.markDeclIndexAlive(variable.decl), .extern_func => |extern_func| try mod.markDeclIndexAlive(extern_func.decl), - .func => |func| try mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), + .func => |func| try mod.markDeclIndexAlive(func.owner_decl), .error_union => |error_union| switch (error_union.val) { .err_name => {}, .payload => |payload| try mod.markReferencedDeclsAlive(payload.toValue()), @@ -6851,8 +6643,8 @@ pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator return mod.ptrType(info); } -pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type { - return (try intern(mod, .{ .func_type = info })).toType(); +pub fn funcType(mod: *Module, key: InternPool.GetFuncTypeKey) Allocator.Error!Type { + return (try mod.intern_pool.getFuncType(mod.gpa, key)).toType(); } /// Use this for `anyframe->T` only. @@ -7231,16 +7023,28 @@ pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType { return mod.intern_pool.indexToFuncType(ty.toIntern()); } -pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet { +pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*InferredErrorSet { const index = typeToInferredErrorSetIndex(mod, ty).unwrap() orelse return null; return mod.inferredErrorSetPtr(index); } -pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) Fn.InferredErrorSet.OptionalIndex { +pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) InferredErrorSet.OptionalIndex { if (ty.ip_index == .none) return .none; return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern()); } +pub fn funcOwnerDeclPtr(mod: *Module, func_index: InternPool.Index) *Decl { + return mod.declPtr(mod.funcOwnerDeclIndex(func_index)); +} + +pub fn funcOwnerDeclIndex(mod: *Module, func_index: InternPool.Index) Decl.Index { + return mod.funcInfo(func_index).owner_decl; +} + +pub fn funcInfo(mod: *Module, func_index: InternPool.Index) InternPool.Key.Func { + return mod.intern_pool.indexToKey(func_index).func; +} + pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { @setCold(true); const owner_decl = mod.declPtr(owner_decl_index); @@ -7265,3 +7069,57 @@ pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQu pub fn toEnum(mod: *Module, comptime E: type, val: Value) E { return mod.intern_pool.toEnum(E, val.toIntern()); } + +pub fn isAnytypeParam(mod: *Module, func: InternPool.Index, index: u32) bool { + const file = mod.declPtr(func.owner_decl).getFileScope(mod); + + const tags = file.zir.instructions.items(.tag); + + const param_body = file.zir.getParamBody(func.zir_body_inst); + const param = param_body[index]; + + return switch (tags[param]) { + .param, .param_comptime => false, + .param_anytype, .param_anytype_comptime => true, + else => unreachable, + }; +} + +pub fn getParamName(mod: *Module, func_index: InternPool.Index, index: u32) [:0]const u8 { + const func = mod.funcInfo(func_index); + const file = mod.declPtr(func.owner_decl).getFileScope(mod); + + const tags = file.zir.instructions.items(.tag); + const data = file.zir.instructions.items(.data); + + const param_body = file.zir.getParamBody(func.zir_body_inst); + const param = param_body[index]; + + return switch (tags[param]) { + .param, .param_comptime => blk: { + const extra = file.zir.extraData(Zir.Inst.Param, data[param].pl_tok.payload_index); + break :blk file.zir.nullTerminatedString(extra.data.name); + }, + .param_anytype, .param_anytype_comptime => blk: { + const param_data = data[param].str_tok; + break :blk param_data.get(file.zir); + }, + else => unreachable, + }; +} + +pub fn hasInferredErrorSet(mod: *Module, func: InternPool.Key.Func) bool { + const owner_decl = mod.declPtr(func.owner_decl); + const zir = owner_decl.getFileScope(mod).zir; + const zir_tags = zir.instructions.items(.tag); + switch (zir_tags[func.zir_body_inst]) { + .func => return false, + .func_inferred => return true, + .func_fancy => { + const inst_data = zir.instructions.items(.data)[func.zir_body_inst].pl_node; + const extra = zir.extraData(Zir.Inst.FuncFancy, inst_data.payload_index); + return extra.data.bits.is_inferred_error; + }, + else => unreachable, + } +} -- cgit v1.2.3 From 55e89255e18163bcc153138a4883ec8d85e0d517 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 6 Jul 2023 15:27:47 -0700 Subject: compiler: begin untangling anonymous decls from source decls The idea here is to move towards a future where anonymous decls are represented entirely by an `InternPool.Index`. This was needed to start implementing `InternPool.getFuncDecl` which requires moving creation and deletion of Decl objects into InternPool. * remove `Namespace.anon_decls` * remove the concept of cleaning up resources from anonymous decls, relying on InternPool instead. * move namespace and decl object allocation into InternPool --- src/Compilation.zig | 8 +-- src/InternPool.zig | 111 ++++++++++++++++++++++++++++++++++++-- src/Module.zig | 149 +++++++++++++++------------------------------------- src/Sema.zig | 33 +++++++----- 4 files changed, 170 insertions(+), 131 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index bc47d9c636..eb4b67933d 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2055,15 +2055,9 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void const decl = module.declPtr(decl_index); assert(decl.deletion_flag); assert(decl.dependants.count() == 0); - const is_anon = if (decl.zir_decl_index == 0) blk: { - break :blk module.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index); - } else false; + assert(decl.zir_decl_index != 0); try module.clearDecl(decl_index, null); - - if (is_anon) { - module.destroyDecl(decl_index); - } } try module.processExports(); diff --git a/src/InternPool.zig b/src/InternPool.zig index 17f40c6d2a..10ea950419 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -20,6 +20,25 @@ limbs: std.ArrayListUnmanaged(u64) = .{}, /// `string_bytes` array is agnostic to either usage. string_bytes: std.ArrayListUnmanaged(u8) = .{}, +/// Rather than allocating Decl objects with an Allocator, we instead allocate +/// them with this SegmentedList. This provides four advantages: +/// * Stable memory so that one thread can access a Decl object while another +/// thread allocates additional Decl objects from this list. +/// * It allows us to use u32 indexes to reference Decl objects rather than +/// pointers, saving memory in Type, Value, and dependency sets. +/// * Using integers to reference Decl objects rather than pointers makes +/// serialization trivial. +/// * It provides a unique integer to be used for anonymous symbol names, avoiding +/// multi-threaded contention on an atomic counter. +allocated_decls: std.SegmentedList(Module.Decl, 0) = .{}, +/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack. +decls_free_list: std.ArrayListUnmanaged(Module.Decl.Index) = .{}, + +/// Same pattern as with `allocated_decls`. +allocated_namespaces: std.SegmentedList(Module.Namespace, 0) = .{}, +/// Same pattern as with `decls_free_list`. +namespaces_free_list: std.ArrayListUnmanaged(Module.Namespace.Index) = .{}, + /// Struct objects are stored in this data structure because: /// * They contain pointers such as the field maps. /// * They need to be mutated after creation. @@ -2694,6 +2713,12 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.inferred_error_sets_free_list.deinit(gpa); ip.allocated_inferred_error_sets.deinit(gpa); + ip.decls_free_list.deinit(gpa); + ip.allocated_decls.deinit(gpa); + + ip.namespaces_free_list.deinit(gpa); + ip.allocated_namespaces.deinit(gpa); + for (ip.maps.items) |*map| map.deinit(gpa); ip.maps.deinit(gpa); @@ -4274,6 +4299,7 @@ pub fn getExternFunc(ip: *InternPool, gpa: Allocator, key: GetExternFuncKey) All } pub const GetFuncDeclKey = struct { + fn_owner_decl: Module.Decl.Index, param_types: []const Index, noalias_bits: u32, comptime_bits: u32, @@ -4303,9 +4329,36 @@ pub const GetFuncDeclKey = struct { }; pub fn getFuncDecl(ip: *InternPool, gpa: Allocator, key: GetFuncDeclKey) Allocator.Error!Index { - _ = ip; - _ = gpa; - _ = key; + const fn_owner_decl = ip.declPtr(key.fn_owner_decl); + const decl_index = try ip.createDecl(gpa, .{ + .name = undefined, + .src_namespace = fn_owner_decl.src_namespace, + .src_node = fn_owner_decl.src_node, + .src_line = fn_owner_decl.src_line, + .has_tv = true, + .owns_tv = true, + .ty = @panic("TODO"), + .val = @panic("TODO"), + .alignment = .none, + .@"linksection" = fn_owner_decl.@"linksection", + .@"addrspace" = fn_owner_decl.@"addrspace", + .analysis = .complete, + .deletion_flag = false, + .zir_decl_index = fn_owner_decl.zir_decl_index, + .src_scope = fn_owner_decl.src_scope, + .generation = 0, + .is_pub = fn_owner_decl.is_pub, + .is_exported = fn_owner_decl.is_exported, + .has_linksection_or_addrspace = fn_owner_decl.has_linksection_or_addrspace, + .has_align = fn_owner_decl.has_align, + .alive = true, + .kind = .anon, + }); + // TODO better names for generic function instantiations + const decl_name = try ip.getOrPutStringFmt(gpa, "{}__anon_{d}", .{ + fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index), + }); + ip.declPtr(decl_index).name = decl_name; @panic("TODO"); } @@ -5553,6 +5606,14 @@ pub fn inferredErrorSetPtrConst(ip: *const InternPool, index: Module.InferredErr return ip.allocated_inferred_error_sets.at(@intFromEnum(index)); } +pub fn declPtr(ip: *InternPool, index: Module.Decl.Index) *Module.Decl { + return ip.allocated_decls.at(@intFromEnum(index)); +} + +pub fn namespacePtr(ip: *InternPool, index: Module.Namespace.Index) *Module.Namespace { + return ip.allocated_namespaces.at(@intFromEnum(index)); +} + pub fn createStruct( ip: *InternPool, gpa: Allocator, @@ -5619,6 +5680,50 @@ pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.In }; } +pub fn createDecl( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Decl, +) Allocator.Error!Module.Decl.Index { + if (ip.decls_free_list.popOrNull()) |index| { + ip.allocated_decls.at(@intFromEnum(index)).* = initialization; + return index; + } + const ptr = try ip.allocated_decls.addOne(gpa); + ptr.* = initialization; + return @as(Module.Decl.Index, @enumFromInt(ip.allocated_decls.len - 1)); +} + +pub fn destroyDecl(ip: *InternPool, gpa: Allocator, index: Module.Decl.Index) void { + ip.declPtr(index).* = undefined; + ip.decls_free_list.append(gpa, index) catch { + // In order to keep `destroyDecl` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Decl until garbage collection. + }; +} + +pub fn createNamespace( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Namespace, +) Allocator.Error!Module.Namespace.Index { + if (ip.namespaces_free_list.popOrNull()) |index| { + ip.allocated_namespaces.at(@intFromEnum(index)).* = initialization; + return index; + } + const ptr = try ip.allocated_namespaces.addOne(gpa); + ptr.* = initialization; + return @as(Module.Namespace.Index, @enumFromInt(ip.allocated_namespaces.len - 1)); +} + +pub fn destroyNamespace(ip: *InternPool, gpa: Allocator, index: Module.Namespace.Index) void { + ip.namespacePtr(index).* = undefined; + ip.namespaces_free_list.append(gpa, index) catch { + // In order to keep `destroyNamespace` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Namespace until garbage collection. + }; +} + pub fn getOrPutString( ip: *InternPool, gpa: Allocator, diff --git a/src/Module.zig b/src/Module.zig index a91a24987d..3dd924d17a 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -87,7 +87,9 @@ import_table: std.StringArrayHashMapUnmanaged(*File) = .{}, /// Keys are fully resolved file paths. This table owns the keys and values. embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, -/// Stores all Type and Value objects; periodically garbage collected. +/// Stores all Type and Value objects. +/// The idea is that this will be periodically garbage-collected, but such logic +/// is not yet implemented. intern_pool: InternPool = .{}, /// To be eliminated in a future commit by moving more data into InternPool. @@ -152,25 +154,6 @@ emit_h: ?*GlobalEmitH, test_functions: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, -/// Rather than allocating Decl objects with an Allocator, we instead allocate -/// them with this SegmentedList. This provides four advantages: -/// * Stable memory so that one thread can access a Decl object while another -/// thread allocates additional Decl objects from this list. -/// * It allows us to use u32 indexes to reference Decl objects rather than -/// pointers, saving memory in Type, Value, and dependency sets. -/// * Using integers to reference Decl objects rather than pointers makes -/// serialization trivial. -/// * It provides a unique integer to be used for anonymous symbol names, avoiding -/// multi-threaded contention on an atomic counter. -allocated_decls: std.SegmentedList(Decl, 0) = .{}, -/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack. -decls_free_list: ArrayListUnmanaged(Decl.Index) = .{}, - -/// Same pattern as with `allocated_decls`. -allocated_namespaces: std.SegmentedList(Namespace, 0) = .{}, -/// Same pattern as with `decls_free_list`. -namespaces_free_list: ArrayListUnmanaged(Namespace.Index) = .{}, - global_assembly: std.AutoHashMapUnmanaged(Decl.Index, []u8) = .{}, reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { @@ -313,6 +296,9 @@ pub const CaptureScope = struct { } pub fn incRef(self: *CaptureScope) void { + // TODO: wtf is reference counting doing in my beautiful codebase? 😠 + // seriously though, let's change this to rely on InternPool garbage + // collection instead. self.refs += 1; } @@ -1427,12 +1413,10 @@ pub const Namespace = struct { /// Direct children of the namespace. Used during an update to detect /// which decls have been added/removed from source. /// Declaration order is preserved via entry order. - /// Key memory is owned by `decl.name`. - /// Anonymous decls are not stored here; they are kept in `anon_decls` instead. + /// These are only declarations named directly by the AST; anonymous + /// declarations are not stored here. decls: std.ArrayHashMapUnmanaged(Decl.Index, void, DeclContext, true) = .{}, - anon_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, - /// Key is usingnamespace Decl itself. To find the namespace being included, /// the Decl Value has to be resolved as a Type which has a Namespace. /// Value is whether the usingnamespace decl is marked `pub`. @@ -1487,18 +1471,11 @@ pub const Namespace = struct { var decls = ns.decls; ns.decls = .{}; - var anon_decls = ns.anon_decls; - ns.anon_decls = .{}; - for (decls.keys()) |decl_index| { mod.destroyDecl(decl_index); } decls.deinit(gpa); - for (anon_decls.keys()) |key| { - mod.destroyDecl(key); - } - anon_decls.deinit(gpa); ns.usingnamespace_set.deinit(gpa); } @@ -1512,9 +1489,6 @@ pub const Namespace = struct { var decls = ns.decls; ns.decls = .{}; - var anon_decls = ns.anon_decls; - ns.anon_decls = .{}; - // TODO rework this code to not panic on OOM. // (might want to coordinate with the clearDecl function) @@ -1524,12 +1498,6 @@ pub const Namespace = struct { } decls.deinit(gpa); - for (anon_decls.keys()) |child_decl| { - mod.clearDecl(child_decl, outdated_decls) catch @panic("out of memory"); - mod.destroyDecl(child_decl); - } - anon_decls.deinit(gpa); - ns.usingnamespace_set.deinit(gpa); } @@ -3195,14 +3163,9 @@ pub fn deinit(mod: *Module) void { mod.test_functions.deinit(gpa); - mod.decls_free_list.deinit(gpa); - mod.allocated_decls.deinit(gpa); mod.global_assembly.deinit(gpa); mod.reference_table.deinit(gpa); - mod.namespaces_free_list.deinit(gpa); - mod.allocated_namespaces.deinit(gpa); - mod.memoized_decls.deinit(gpa); mod.intern_pool.deinit(gpa); mod.tmp_hack_arena.deinit(); @@ -3210,6 +3173,8 @@ pub fn deinit(mod: *Module) void { pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { const gpa = mod.gpa; + const ip = &mod.intern_pool; + { const decl = mod.declPtr(decl_index); _ = mod.test_functions.swapRemove(decl_index); @@ -3228,12 +3193,10 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { if (decl.src_scope) |scope| scope.decRef(gpa); decl.dependants.deinit(gpa); decl.dependencies.deinit(gpa); - decl.* = undefined; } - mod.decls_free_list.append(gpa, decl_index) catch { - // In order to keep `destroyDecl` a non-fallible function, we ignore memory - // allocation failures here, instead leaking the Decl until garbage collection. - }; + + ip.destroyDecl(gpa, decl_index); + if (mod.emit_h) |mod_emit_h| { const decl_emit_h = mod_emit_h.declPtr(decl_index); decl_emit_h.fwd_decl.deinit(gpa); @@ -3242,11 +3205,11 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { } pub fn declPtr(mod: *Module, index: Decl.Index) *Decl { - return mod.allocated_decls.at(@intFromEnum(index)); + return mod.intern_pool.declPtr(index); } pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace { - return mod.allocated_namespaces.at(@intFromEnum(index)); + return mod.intern_pool.namespacePtr(index); } pub fn unionPtr(mod: *Module, index: Union.Index) *Union { @@ -3740,9 +3703,6 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { for (namespace.decls.keys()) |sub_decl| { try decl_stack.append(gpa, sub_decl); } - for (namespace.anon_decls.keys()) |sub_decl| { - try decl_stack.append(gpa, sub_decl); - } } } } @@ -5202,21 +5162,19 @@ pub fn clearDecl( } /// This function is exclusively called for anonymous decls. +/// All resources referenced by anonymous decls are owned by InternPool +/// so there is no cleanup to do here. pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { - const decl = mod.declPtr(decl_index); - - assert(!mod.declIsRoot(decl_index)); - assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); + const gpa = mod.gpa; + const ip = &mod.intern_pool; - const dependants = decl.dependants.keys(); - for (dependants) |dep| { - mod.declPtr(dep).removeDependency(decl_index); - } + ip.destroyDecl(gpa, decl_index); - for (decl.dependencies.keys()) |dep| { - mod.declPtr(dep).removeDependant(decl_index); + if (mod.emit_h) |mod_emit_h| { + const decl_emit_h = mod_emit_h.declPtr(decl_index); + decl_emit_h.fwd_decl.deinit(gpa); + decl_emit_h.* = undefined; } - mod.destroyDecl(decl_index); } /// We don't perform a deletion here, because this Decl or another one @@ -5233,7 +5191,6 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { const decl = mod.declPtr(decl_index); assert(!mod.declIsRoot(decl_index)); - assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); // An aborted decl must not have dependants -- they must have // been aborted first and removed from this list. @@ -5545,21 +5502,11 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { } pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index { - if (mod.namespaces_free_list.popOrNull()) |index| { - mod.allocated_namespaces.at(@intFromEnum(index)).* = initialization; - return index; - } - const ptr = try mod.allocated_namespaces.addOne(mod.gpa); - ptr.* = initialization; - return @as(Namespace.Index, @enumFromInt(mod.allocated_namespaces.len - 1)); + return mod.intern_pool.createNamespace(mod.gpa, initialization); } pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void { - mod.namespacePtr(index).* = undefined; - mod.namespaces_free_list.append(mod.gpa, index) catch { - // In order to keep `destroyNamespace` a non-fallible function, we ignore memory - // allocation failures here, instead leaking the Namespace until garbage collection. - }; + return mod.intern_pool.destroyNamespace(mod.gpa, index); } pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index { @@ -5584,29 +5531,9 @@ pub fn allocateNewDecl( src_node: Ast.Node.Index, src_scope: ?*CaptureScope, ) !Decl.Index { - const decl_and_index: struct { - new_decl: *Decl, - decl_index: Decl.Index, - } = if (mod.decls_free_list.popOrNull()) |decl_index| d: { - break :d .{ - .new_decl = mod.declPtr(decl_index), - .decl_index = decl_index, - }; - } else d: { - const decl = try mod.allocated_decls.addOne(mod.gpa); - errdefer mod.allocated_decls.shrinkRetainingCapacity(mod.allocated_decls.len - 1); - if (mod.emit_h) |mod_emit_h| { - const decl_emit_h = try mod_emit_h.allocated_emit_h.addOne(mod.gpa); - decl_emit_h.* = .{}; - } - break :d .{ - .new_decl = decl, - .decl_index = @as(Decl.Index, @enumFromInt(mod.allocated_decls.len - 1)), - }; - }; - - if (src_scope) |scope| scope.incRef(); - decl_and_index.new_decl.* = .{ + const ip = &mod.intern_pool; + const gpa = mod.gpa; + const decl_index = try ip.createDecl(gpa, .{ .name = undefined, .src_namespace = namespace, .src_node = src_node, @@ -5629,9 +5556,18 @@ pub fn allocateNewDecl( .has_align = false, .alive = false, .kind = .anon, - }; + }); + + if (mod.emit_h) |mod_emit_h| { + if (@intFromEnum(decl_index) >= mod_emit_h.allocated_emit_h.len) { + try mod_emit_h.allocated_emit_h.append(gpa, .{}); + assert(@intFromEnum(decl_index) == mod_emit_h.allocated_emit_h.len); + } + } + + if (src_scope) |scope| scope.incRef(); - return decl_and_index.decl_index; + return decl_index; } pub fn getErrorValue( @@ -5667,7 +5603,7 @@ pub fn createAnonymousDeclFromDecl( const name = try mod.intern_pool.getOrPutStringFmt(mod.gpa, "{}__anon_{d}", .{ src_decl.name.fmt(&mod.intern_pool), @intFromEnum(new_decl_index), }); - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, tv, name); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, tv, name); return new_decl_index; } @@ -5675,7 +5611,6 @@ pub fn initNewAnonDecl( mod: *Module, new_decl_index: Decl.Index, src_line: u32, - namespace: Namespace.Index, typed_value: TypedValue, name: InternPool.NullTerminatedString, ) Allocator.Error!void { @@ -5692,8 +5627,6 @@ pub fn initNewAnonDecl( new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; - - try mod.namespacePtr(namespace).anon_decls.putNoClobber(mod.gpa, new_decl_index, {}); } pub fn errNoteNonLazy( diff --git a/src/Sema.zig b/src/Sema.zig index b7ea53676a..25cacc01ea 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2891,12 +2891,12 @@ fn createAnonymousDeclTypeNamed( const name = mod.intern_pool.getOrPutStringFmt(gpa, "{}__{s}_{d}", .{ src_decl.name.fmt(&mod.intern_pool), anon_prefix, @intFromEnum(new_decl_index), }) catch unreachable; - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name); return new_decl_index; }, .parent => { const name = mod.declPtr(block.src_decl).name; - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name); return new_decl_index; }, .func => { @@ -2932,7 +2932,7 @@ fn createAnonymousDeclTypeNamed( try writer.writeByte(')'); const name = try mod.intern_pool.getOrPutString(gpa, buf.items); - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name); return new_decl_index; }, .dbg_var => { @@ -2948,7 +2948,7 @@ fn createAnonymousDeclTypeNamed( src_decl.name.fmt(&mod.intern_pool), zir_data[i].str_op.getStr(sema.code), }); - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name); return new_decl_index; }, else => {}, @@ -7393,11 +7393,12 @@ fn instantiateGenericCall( const ip = &mod.intern_pool; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { - .func => |x| x, - .ptr => |ptr| mod.intern_pool.indexToKey(mod.declPtr(ptr.addr.decl).val.toIntern()).func, + const generic_owner = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + .func => func_val.toIntern(), + .ptr => |ptr| mod.declPtr(ptr.addr.decl).val.toIntern(), else => unreachable, }; + const generic_owner_func = mod.intern_pool.indexToKey(generic_owner).func; // Even though there may already be a generic instantiation corresponding // to this callsite, we must evaluate the expressions of the generic @@ -7407,11 +7408,11 @@ fn instantiateGenericCall( // The actual monomorphization happens via adding `func_instance` to // `InternPool`. - const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + const fn_owner_decl = mod.declPtr(generic_owner_func.owner_decl); const namespace_index = fn_owner_decl.src_namespace; const namespace = mod.namespacePtr(namespace_index); const fn_zir = namespace.file_scope.zir; - const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); + const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst); const comptime_args = try sema.arena.alloc(InternPool.Index, uncasted_args.len); @memset(comptime_args, .none); @@ -7434,7 +7435,7 @@ fn instantiateGenericCall( .fn_ret_ty = Type.void, .owner_func_index = .none, .comptime_args = comptime_args, - .generic_owner = module_fn.generic_owner, + .generic_owner = generic_owner, .branch_quota = sema.branch_quota, .branch_count = sema.branch_count, .comptime_mutable_decls = sema.comptime_mutable_decls, @@ -7444,7 +7445,7 @@ fn instantiateGenericCall( var child_block: Block = .{ .parent = null, .sema = &child_sema, - .src_decl = module_fn.owner_decl, + .src_decl = generic_owner_func.owner_decl, .namespace = namespace_index, .wip_capture_scope = block.wip_capture_scope, .instructions = .{}, @@ -8737,7 +8738,13 @@ fn funcCommon( if (inferred_error_set) try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); + const fn_owner_decl = if (sema.generic_owner != .none) + mod.funcOwnerDeclIndex(sema.generic_owner) + else + sema.owner_decl_index; + break :i try ip.getFuncDecl(gpa, .{ + .fn_owner_decl = fn_owner_decl, .param_types = param_types, .noalias_bits = noalias_bits, .comptime_bits = comptime_bits, @@ -34628,7 +34635,7 @@ fn generateUnionTagTypeNumbered( errdefer mod.destroyDecl(new_decl_index); const fqn = try union_obj.getFullyQualifiedName(mod); const name = try mod.intern_pool.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(&mod.intern_pool)}); - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{ .ty = Type.noreturn, .val = Value.@"unreachable", }, name); @@ -34679,7 +34686,7 @@ fn generateUnionTagTypeSimple( errdefer mod.destroyDecl(new_decl_index); const fqn = try union_obj.getFullyQualifiedName(mod); const name = try mod.intern_pool.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(&mod.intern_pool)}); - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{ .ty = Type.noreturn, .val = Value.@"unreachable", }, name); -- cgit v1.2.3 From f3dc53f6b53e8493b341f82cb06a56e33e80e6b7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 8 Jul 2023 23:39:37 -0700 Subject: compiler: rework inferred error sets * move inferred error sets into InternPool. - they are now represented by pointing directly at the corresponding function body value. * inferred error set working memory is now in Sema and expires after the Sema for the function corresponding to the inferred error set is finished having its body analyzed. * error sets use a InternPool.Index.Slice rather than an actual slice to avoid lifetime issues. --- lib/std/array_hash_map.zig | 10 +- src/InternPool.zig | 714 ++++++++++++++++++++++++++++++------------ src/Module.zig | 144 ++------- src/Sema.zig | 759 ++++++++++++++++++++++++++------------------- src/codegen/llvm.zig | 25 +- src/link/Dwarf.zig | 57 ++-- src/type.zig | 67 ++-- 7 files changed, 1037 insertions(+), 739 deletions(-) (limited to 'src/Module.zig') diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index 62165f0304..1e95352a02 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -1669,8 +1669,9 @@ pub fn ArrayHashMapUnmanaged( inline fn checkedHash(ctx: anytype, key: anytype) u32 { comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(key), K, u32, true); - // If you get a compile error on the next line, it means that - const hash = ctx.hash(key); // your generic hash function doesn't accept your key + // If you get a compile error on the next line, it means that your + // generic hash function doesn't accept your key. + const hash = ctx.hash(key); if (@TypeOf(hash) != u32) { @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type!\n" ++ @typeName(u32) ++ " was expected, but found " ++ @typeName(@TypeOf(hash))); @@ -1679,8 +1680,9 @@ pub fn ArrayHashMapUnmanaged( } inline fn checkedEql(ctx: anytype, a: anytype, b: K, b_index: usize) bool { comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(a), K, u32, true); - // If you get a compile error on the next line, it means that - const eql = ctx.eql(a, b, b_index); // your generic eql function doesn't accept (self, adapt key, K, index) + // If you get a compile error on the next line, it means that your + // generic eql function doesn't accept (self, adapt key, K, index). + const eql = ctx.eql(a, b, b_index); if (@TypeOf(eql) != bool) { @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type!\n" ++ @typeName(bool) ++ " was expected, but found " ++ @typeName(@TypeOf(eql))); diff --git a/src/InternPool.zig b/src/InternPool.zig index 10ea950419..e675848646 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -53,14 +53,6 @@ allocated_unions: std.SegmentedList(Module.Union, 0) = .{}, /// When a Union object is freed from `allocated_unions`, it is pushed into this stack. unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, -/// InferredErrorSet objects are stored in this data structure because: -/// * They contain pointers such as the errors map and the set of other inferred error sets. -/// * They need to be mutated after creation. -allocated_inferred_error_sets: std.SegmentedList(Module.InferredErrorSet, 0) = .{}, -/// When a Struct object is freed from `allocated_inferred_error_sets`, it is -/// pushed into this stack. -inferred_error_sets_free_list: std.ArrayListUnmanaged(Module.InferredErrorSet.Index) = .{}, - /// Some types such as enums, structs, and unions need to store mappings from field names /// to field index, or value to field index. In such cases, they will store the underlying /// field names and values directly, relying on one of these maps, stored separately, @@ -143,12 +135,24 @@ pub const NullTerminatedString = enum(u32) { empty = 0, _, + /// An array of `NullTerminatedString` existing within the `extra` array. + /// This type exists to provide a struct with lifetime that is + /// not invalidated when items are added to the `InternPool`. + pub const Slice = struct { + start: u32, + len: u32, + + pub fn get(slice: Slice, ip: *const InternPool) []NullTerminatedString { + return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); + } + }; + pub fn toString(self: NullTerminatedString) String { - return @as(String, @enumFromInt(@intFromEnum(self))); + return @enumFromInt(@intFromEnum(self)); } pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString { - return @as(OptionalNullTerminatedString, @enumFromInt(@intFromEnum(self))); + return @enumFromInt(@intFromEnum(self)); } const Adapter = struct { @@ -238,7 +242,8 @@ pub const Key = union(enum) { enum_type: EnumType, func_type: FuncType, error_set_type: ErrorSetType, - inferred_error_set_type: Module.InferredErrorSet.Index, + /// The payload is the function body, either a `func_decl` or `func_instance`. + inferred_error_set_type: Index, /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented /// via `simple_value` and has a named `Index` tag for it. @@ -287,14 +292,14 @@ pub const Key = union(enum) { pub const ErrorSetType = struct { /// Set of error names, sorted by null terminated string index. - names: []const NullTerminatedString, + names: NullTerminatedString.Slice, /// This is ignored by `get` but will always be provided by `indexToKey`. names_map: OptionalMapIndex = .none, /// Look up field index based on field name. pub fn nameIndex(self: ErrorSetType, ip: *const InternPool, name: NullTerminatedString) ?u32 { const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)]; - const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; + const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) }; const field_index = map.getIndexAdapted(name, adapter) orelse return null; return @as(u32, @intCast(field_index)); } @@ -565,6 +570,9 @@ pub const Key = union(enum) { /// Index into extra array of the `zir_body_inst` corresponding to this function. /// Used for mutating that data. zir_body_inst_extra_index: u32, + /// Index into extra array of the resolved inferred error set for this function. + /// Used for mutating that data. + resolved_error_set_extra_index: u32, /// When a generic function is instantiated, branch_quota is inherited from the /// active Sema context. Importantly, this value is also updated when an existing /// generic function instantiation is found and called. @@ -603,13 +611,21 @@ pub const Key = union(enum) { return @ptrCast(&ip.extra.items[func.analysis_extra_index]); } + /// Returns a pointer that becomes invalid after any additions to the `InternPool`. pub fn zirBodyInst(func: *const Func, ip: *const InternPool) *Zir.Inst.Index { return @ptrCast(&ip.extra.items[func.zir_body_inst_extra_index]); } + /// Returns a pointer that becomes invalid after any additions to the `InternPool`. pub fn branchQuota(func: *const Func, ip: *const InternPool) *u32 { return &ip.extra.items[func.zir_body_inst_extra_index]; } + + /// Returns a pointer that becomes invalid after any additions to the `InternPool`. + pub fn resolvedErrorSet(func: *const Func, ip: *const InternPool) *Index { + assert(func.analysis(ip).inferred_error_set); + return @ptrCast(&ip.extra.items[func.resolved_error_set_extra_index]); + } }; pub const Int = struct { @@ -750,7 +766,7 @@ pub const Key = union(enum) { }; pub fn hash32(key: Key, ip: *const InternPool) u32 { - return @as(u32, @truncate(key.hash64(ip))); + return @truncate(key.hash64(ip)); } pub fn hash64(key: Key, ip: *const InternPool) u64 { @@ -914,11 +930,7 @@ pub const Key = union(enum) { return hasher.final(); }, - .error_set_type => |error_set_type| { - var hasher = Hash.init(seed); - for (error_set_type.names) |elem| std.hash.autoHash(&hasher, elem); - return hasher.final(); - }, + .error_set_type => |x| Hash.hash(seed, std.mem.sliceAsBytes(x.names.get(ip))), .anon_struct_type => |anon_struct_type| { var hasher = Hash.init(seed); @@ -1225,7 +1237,7 @@ pub const Key = union(enum) { }, .error_set_type => |a_info| { const b_info = b.error_set_type; - return std.mem.eql(NullTerminatedString, a_info.names, b_info.names); + return std.mem.eql(NullTerminatedString, a_info.names.get(ip), b_info.names.get(ip)); }, .inferred_error_set_type => |a_info| { const b_info = b.inferred_error_set_type; @@ -1518,13 +1530,14 @@ pub const Index = enum(u32) { type_optional: DataIsIndex, type_anyframe: DataIsIndex, type_error_union: struct { data: *Key.ErrorUnionType }, + type_anyerror_union: DataIsIndex, type_error_set: struct { const @"data.names_len" = opaque {}; data: *Tag.ErrorSet, @"trailing.names.len": *@"data.names_len", trailing: struct { names: []NullTerminatedString }, }, - type_inferred_error_set: struct { data: Module.InferredErrorSet.Index }, + type_inferred_error_set: DataIsIndex, type_enum_auto: struct { const @"data.fields_len" = opaque {}; data: *EnumAuto, @@ -1916,11 +1929,14 @@ pub const Tag = enum(u8) { /// An error union type. /// data is payload to `Key.ErrorUnionType`. type_error_union, + /// An error union type of the form `anyerror!T`. + /// data is `Index` of payload type. + type_anyerror_union, /// An error set type. /// data is payload to `ErrorSet`. type_error_set, /// The inferred error set type of a function. - /// data is `Module.InferredErrorSet.Index`. + /// data is `Index` of a `func_decl` or `func_instance`. type_inferred_error_set, /// An enum type with auto-numbered tag values. /// The enum is exhaustive. @@ -2156,6 +2172,7 @@ pub const Tag = enum(u8) { .type_optional => unreachable, .type_anyframe => unreachable, .type_error_union => ErrorUnionType, + .type_anyerror_union => unreachable, .type_error_set => ErrorSet, .type_inferred_error_set => unreachable, .type_enum_auto => EnumAuto, @@ -2251,6 +2268,10 @@ pub const Tag = enum(u8) { ty: Index, }; + /// Trailing: + /// 0. If `analysis.inferred_error_set` is `true`, `Index` of an `error_set` which + /// is a regular error set corresponding to the finished inferred error set. + /// A `none` value marks that the inferred error set is not resolved yet. pub const FuncDecl = struct { analysis: FuncAnalysis, owner_decl: Module.Decl.Index, @@ -2263,10 +2284,10 @@ pub const Tag = enum(u8) { }; /// Trailing: - /// 0. For each parameter of generic_owner: Index - /// - comptime parameter: the comptime-known value - /// - anytype parameter: the type of the runtime-known value - /// - otherwise: `none` + /// 0. If `analysis.inferred_error_set` is `true`, `Index` of an `error_set` which + /// is a regular error set corresponding to the finished inferred error set. + /// A `none` value marks that the inferred error set is not resolved yet. + /// 1. For each parameter of generic_owner: `Index` if comptime, otherwise `none` pub const FuncInstance = struct { analysis: FuncAnalysis, // Needed by the linker for codegen. Not part of hashing or equality. @@ -2312,14 +2333,19 @@ pub const Tag = enum(u8) { }; /// State that is mutable during semantic analysis. This data is not used for -/// equality or hashing. +/// equality or hashing, except for `inferred_error_set` which is considered +/// to be part of the type of the function. pub const FuncAnalysis = packed struct(u32) { state: State, is_cold: bool, is_noinline: bool, calls_or_awaits_errorable_fn: bool, stack_alignment: Alignment, - _: u15 = 0, + + /// True if this function has an inferred error set. + inferred_error_set: bool, + + _: u14 = 0, pub const State = enum(u8) { /// This function has not yet undergone analysis, because we have not @@ -2710,9 +2736,6 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.unions_free_list.deinit(gpa); ip.allocated_unions.deinit(gpa); - ip.inferred_error_sets_free_list.deinit(gpa); - ip.allocated_inferred_error_sets.deinit(gpa); - ip.decls_free_list.deinit(gpa); ip.allocated_decls.deinit(gpa); @@ -2780,19 +2803,15 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { return .{ .ptr_type = ptr_info }; }, - .type_optional => .{ .opt_type = @as(Index, @enumFromInt(data)) }, - .type_anyframe => .{ .anyframe_type = @as(Index, @enumFromInt(data)) }, + .type_optional => .{ .opt_type = @enumFromInt(data) }, + .type_anyframe => .{ .anyframe_type = @enumFromInt(data) }, .type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) }, - .type_error_set => { - const error_set = ip.extraDataTrail(Tag.ErrorSet, data); - const names_len = error_set.data.names_len; - const names = ip.extra.items[error_set.end..][0..names_len]; - return .{ .error_set_type = .{ - .names = @ptrCast(names), - .names_map = error_set.data.names_map.toOptional(), - } }; - }, + .type_anyerror_union => .{ .error_union_type = .{ + .error_set_type = .anyerror_type, + .payload_type = @enumFromInt(data), + } }, + .type_error_set => ip.indexToKeyErrorSetType(data), .type_inferred_error_set => .{ .inferred_error_set_type = @enumFromInt(data), }, @@ -2870,7 +2889,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .type_enum_explicit => ip.indexToKeyEnum(data, .explicit), .type_enum_nonexhaustive => ip.indexToKeyEnum(data, .nonexhaustive), - .type_function => .{ .func_type = ip.indexToKeyFuncType(data) }, + .type_function => .{ .func_type = ip.extraFuncType(data) }, .undef => .{ .undef = @as(Index, @enumFromInt(data)) }, .runtime_value => .{ .runtime_value = ip.extraData(Tag.TypeValue, data) }, @@ -3117,12 +3136,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, .extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) }, - .func_instance => { - @panic("TODO"); - }, - .func_decl => { - @panic("TODO"); - }, + .func_instance => .{ .func = ip.indexToKeyFuncInstance(data) }, + .func_decl => .{ .func = ip.indexToKeyFuncDecl(data) }, .only_possible_value => { const ty = @as(Index, @enumFromInt(data)); const ty_item = ip.items.get(@intFromEnum(ty)); @@ -3227,8 +3242,19 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }; } -fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType { - const type_function = ip.extraDataTrail(Tag.TypeFunction, data); +fn indexToKeyErrorSetType(ip: *const InternPool, data: u32) Key { + const error_set = ip.extraDataTrail(Tag.ErrorSet, data); + return .{ .error_set_type = .{ + .names = .{ + .start = @intCast(error_set.end), + .len = error_set.data.names_len, + }, + .names_map = error_set.data.names_map.toOptional(), + } }; +} + +fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType { + const type_function = ip.extraDataTrail(Tag.TypeFunction, extra_index); var index: usize = type_function.end; const comptime_bits: u32 = if (!type_function.data.flags.has_comptime_bits) 0 else b: { const x = ip.extra.items[index]; @@ -3256,14 +3282,22 @@ fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType { .cc_is_generic = type_function.data.flags.cc_is_generic, .section_is_generic = type_function.data.flags.section_is_generic, .addrspace_is_generic = type_function.data.flags.addrspace_is_generic, - .is_generic = comptime_bits != 0 or - type_function.data.flags.align_is_generic or - type_function.data.flags.cc_is_generic or - type_function.data.flags.section_is_generic or - type_function.data.flags.addrspace_is_generic, + .is_generic = type_function.data.flags.is_generic, }; } +fn indexToKeyFuncDecl(ip: *const InternPool, data: u32) Key.Func { + _ = ip; + _ = data; + @panic("TODO"); +} + +fn indexToKeyFuncInstance(ip: *const InternPool, data: u32) Key.Func { + _ = ip; + _ = data; + @panic("TODO"); +} + fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { const enum_explicit = ip.extraDataTrail(EnumExplicit, data); const names = @as( @@ -3301,7 +3335,7 @@ fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); - if (gop.found_existing) return @as(Index, @enumFromInt(gop.index)); + if (gop.found_existing) return @enumFromInt(gop.index); try ip.items.ensureUnusedCapacity(gpa, 1); switch (key) { .int_type => |int_type| { @@ -3392,17 +3426,20 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, .error_union_type => |error_union_type| { - ip.items.appendAssumeCapacity(.{ + ip.items.appendAssumeCapacity(if (error_union_type.error_set_type == .anyerror_type) .{ + .tag = .type_anyerror_union, + .data = @intFromEnum(error_union_type.payload_type), + } else .{ .tag = .type_error_union, .data = try ip.addExtra(gpa, error_union_type), }); }, .error_set_type => |error_set_type| { assert(error_set_type.names_map == .none); - assert(std.sort.isSorted(NullTerminatedString, error_set_type.names, {}, NullTerminatedString.indexLessThan)); + assert(std.sort.isSorted(NullTerminatedString, error_set_type.names.get(ip), {}, NullTerminatedString.indexLessThan)); const names_map = try ip.addMap(gpa); - try addStringsToMap(ip, gpa, names_map, error_set_type.names); - const names_len = @as(u32, @intCast(error_set_type.names.len)); + try addStringsToMap(ip, gpa, names_map, error_set_type.names.get(ip)); + const names_len = error_set_type.names.len; try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names_len); ip.items.appendAssumeCapacity(.{ .tag = .type_error_set, @@ -3411,7 +3448,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .names_map = names_map, }), }); - ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(error_set_type.names))); + ip.extra.appendSliceAssumeCapacity(@ptrCast(error_set_type.names.get(ip))); }, .inferred_error_set_type => |ies_index| { ip.items.appendAssumeCapacity(.{ @@ -4207,7 +4244,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(memoized_call.arg_values))); }, } - return @as(Index, @enumFromInt(ip.items.len - 1)); + return @enumFromInt(ip.items.len - 1); } /// This is equivalent to `Key.FuncType` but adjusted to have a slice for `param_types`. @@ -4216,13 +4253,13 @@ pub const GetFuncTypeKey = struct { return_type: Index, comptime_bits: u32, noalias_bits: u32, - alignment: Alignment, - cc: std.builtin.CallingConvention, + /// `null` means generic. + alignment: ?Alignment, + /// `null` means generic. + cc: ?std.builtin.CallingConvention, is_var_args: bool, is_generic: bool, is_noinline: bool, - align_is_generic: bool, - cc_is_generic: bool, section_is_generic: bool, addrspace_is_generic: bool, }; @@ -4244,40 +4281,42 @@ pub fn getFuncType(ip: *InternPool, gpa: Allocator, key: GetFuncTypeKey) Allocat params_len); try ip.items.ensureUnusedCapacity(gpa, 1); - ip.items.appendAssumeCapacity(.{ - .tag = .type_function, - .data = ip.addExtraAssumeCapacity(Tag.TypeFunction{ - .params_len = params_len, - .return_type = key.return_type, - .flags = .{ - .alignment = key.alignment, - .cc = key.cc, - .is_var_args = key.is_var_args, - .has_comptime_bits = key.comptime_bits != 0, - .has_noalias_bits = key.noalias_bits != 0, - .is_generic = key.is_generic, - .is_noinline = key.is_noinline, - .align_is_generic = key.align_is_generic, - .cc_is_generic = key.cc_is_generic, - .section_is_generic = key.section_is_generic, - .addrspace_is_generic = key.addrspace_is_generic, - }, - }), + const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ + .params_len = params_len, + .return_type = key.return_type, + .flags = .{ + .alignment = key.alignment orelse .none, + .cc = key.cc orelse .Unspecified, + .is_var_args = key.is_var_args, + .has_comptime_bits = key.comptime_bits != 0, + .has_noalias_bits = key.noalias_bits != 0, + .is_generic = key.is_generic, + .is_noinline = key.is_noinline, + .align_is_generic = key.alignment == null, + .cc_is_generic = key.cc == null, + .section_is_generic = key.section_is_generic, + .addrspace_is_generic = key.addrspace_is_generic, + }, }); + if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = try ip.map.getOrPutAdapted(gpa, Key{ - .func_type = indexToKeyFuncType(ip, @intCast(ip.items.len - 1)), + .func_type = extraFuncType(ip, func_type_extra_index), }, adapter); - if (!gop.found_existing) return @enumFromInt(ip.items.len - 1); + if (gop.found_existing) { + ip.extra.items.len = prev_extra_len; + return @enumFromInt(gop.index); + } - // An existing function type was found; undo the additions to our two arrays. - ip.items.len -= 1; - ip.extra.items.len = prev_extra_len; - return @enumFromInt(gop.index); + ip.items.appendAssumeCapacity(.{ + .tag = .type_function, + .data = func_type_extra_index, + }); + return @enumFromInt(ip.items.len - 1); } pub const GetExternFuncKey = struct { @@ -4299,19 +4338,71 @@ pub fn getExternFunc(ip: *InternPool, gpa: Allocator, key: GetExternFuncKey) All } pub const GetFuncDeclKey = struct { - fn_owner_decl: Module.Decl.Index, - param_types: []const Index, + owner_decl: Module.Decl.Index, + ty: Index, + zir_body_inst: Zir.Inst.Index, + lbrace_line: u32, + rbrace_line: u32, + lbrace_column: u32, + rbrace_column: u32, + cc: ?std.builtin.CallingConvention, + is_noinline: bool, +}; + +pub fn getFuncDecl(ip: *InternPool, gpa: Allocator, key: GetFuncDeclKey) Allocator.Error!Index { + // The strategy here is to add the function type unconditionally, then to + // ask if it already exists, and if so, revert the lengths of the mutated + // arrays. This is similar to what `getOrPutTrailingString` does. + const prev_extra_len = ip.extra.items.len; + + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len); + try ip.items.ensureUnusedCapacity(gpa, 1); + + ip.items.appendAssumeCapacity(.{ + .tag = .func_decl, + .data = ip.addExtraAssumeCapacity(Tag.FuncDecl{ + .analysis = .{ + .state = if (key.cc == .Inline) .inline_only else .none, + .is_cold = false, + .is_noinline = key.is_noinline, + .calls_or_awaits_errorable_fn = false, + .stack_alignment = .none, + .inferred_error_set = false, + }, + .owner_decl = key.owner_decl, + .ty = key.ty, + .zir_body_inst = key.zir_body_inst, + .lbrace_line = key.lbrace_line, + .rbrace_line = key.rbrace_line, + .lbrace_column = key.lbrace_column, + .rbrace_column = key.rbrace_column, + }), + }); + + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = try ip.map.getOrPutAdapted(gpa, Key{ + .func = indexToKeyFuncDecl(ip, @intCast(ip.items.len - 1)), + }, adapter); + if (!gop.found_existing) return @enumFromInt(ip.items.len - 1); + + // An existing function type was found; undo the additions to our two arrays. + ip.items.len -= 1; + ip.extra.items.len = prev_extra_len; + return @enumFromInt(gop.index); +} + +pub const GetFuncDeclIesKey = struct { + owner_decl: Module.Decl.Index, + param_types: []Index, noalias_bits: u32, comptime_bits: u32, - return_type: Index, - inferred_error_set: bool, + bare_return_type: Index, /// null means generic. cc: ?std.builtin.CallingConvention, /// null means generic. alignment: ?Alignment, - section: Section, - /// null means generic - address_space: ?std.builtin.AddressSpace, + section_is_generic: bool, + addrspace_is_generic: bool, is_var_args: bool, is_generic: bool, is_noinline: bool, @@ -4320,63 +4411,258 @@ pub const GetFuncDeclKey = struct { rbrace_line: u32, lbrace_column: u32, rbrace_column: u32, - - pub const Section = union(enum) { - generic, - default, - explicit: InternPool.NullTerminatedString, - }; }; -pub fn getFuncDecl(ip: *InternPool, gpa: Allocator, key: GetFuncDeclKey) Allocator.Error!Index { - const fn_owner_decl = ip.declPtr(key.fn_owner_decl); - const decl_index = try ip.createDecl(gpa, .{ - .name = undefined, - .src_namespace = fn_owner_decl.src_namespace, - .src_node = fn_owner_decl.src_node, - .src_line = fn_owner_decl.src_line, - .has_tv = true, - .owns_tv = true, - .ty = @panic("TODO"), - .val = @panic("TODO"), - .alignment = .none, - .@"linksection" = fn_owner_decl.@"linksection", - .@"addrspace" = fn_owner_decl.@"addrspace", - .analysis = .complete, - .deletion_flag = false, - .zir_decl_index = fn_owner_decl.zir_decl_index, - .src_scope = fn_owner_decl.src_scope, - .generation = 0, - .is_pub = fn_owner_decl.is_pub, - .is_exported = fn_owner_decl.is_exported, - .has_linksection_or_addrspace = fn_owner_decl.has_linksection_or_addrspace, - .has_align = fn_owner_decl.has_align, - .alive = true, - .kind = .anon, +pub fn getFuncDeclIes(ip: *InternPool, gpa: Allocator, key: GetFuncDeclIesKey) Allocator.Error!Index { + // Validate input parameters. + assert(key.bare_return_type != .none); + for (key.param_types) |param_type| assert(param_type != .none); + + // The strategy here is to add the function decl unconditionally, then to + // ask if it already exists, and if so, revert the lengths of the mutated + // arrays. This is similar to what `getOrPutTrailingString` does. + const prev_extra_len = ip.extra.items.len; + const params_len: u32 = @intCast(key.param_types.len); + + try ip.map.ensureUnusedCapacity(gpa, 4); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len + + 1 + // inferred_error_set + @typeInfo(Tag.ErrorUnionType).Struct.fields.len + + @typeInfo(Tag.TypeFunction).Struct.fields.len + + @intFromBool(key.comptime_bits != 0) + + @intFromBool(key.noalias_bits != 0) + + params_len); + try ip.items.ensureUnusedCapacity(gpa, 4); + + ip.items.appendAssumeCapacity(.{ + .tag = .func_decl, + .data = ip.addExtraAssumeCapacity(Tag.FuncDecl{ + .analysis = .{ + .state = if (key.cc == .Inline) .inline_only else .none, + .is_cold = false, + .is_noinline = key.is_noinline, + .calls_or_awaits_errorable_fn = false, + .stack_alignment = .none, + .inferred_error_set = true, + }, + .owner_decl = key.owner_decl, + .ty = @enumFromInt(ip.items.len + 1), + .zir_body_inst = key.zir_body_inst, + .lbrace_line = key.lbrace_line, + .rbrace_line = key.rbrace_line, + .lbrace_column = key.lbrace_column, + .rbrace_column = key.rbrace_column, + }), + }); + ip.extra.appendAssumeCapacity(@intFromEnum(Index.none)); + + ip.items.appendAssumeCapacity(.{ + .tag = .type_error_union, + .data = ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ + .error_set_type = @enumFromInt(ip.items.len + 1), + .payload_type = key.bare_return_type, + }), }); - // TODO better names for generic function instantiations - const decl_name = try ip.getOrPutStringFmt(gpa, "{}__anon_{d}", .{ - fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index), + + ip.items.appendAssumeCapacity(.{ + .tag = .type_inferred_error_set, + .data = @intCast(ip.items.len - 2), }); - ip.declPtr(decl_index).name = decl_name; - @panic("TODO"); + + const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ + .params_len = params_len, + .return_type = @enumFromInt(ip.items.len - 2), + .flags = .{ + .alignment = key.alignment orelse .none, + .cc = key.cc orelse .Unspecified, + .is_var_args = key.is_var_args, + .has_comptime_bits = key.comptime_bits != 0, + .has_noalias_bits = key.noalias_bits != 0, + .is_generic = key.is_generic, + .is_noinline = key.is_noinline, + .align_is_generic = key.alignment == null, + .cc_is_generic = key.cc == null, + .section_is_generic = key.section_is_generic, + .addrspace_is_generic = key.addrspace_is_generic, + }, + }); + if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); + if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); + ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); + + ip.items.appendAssumeCapacity(.{ + .tag = .type_function, + .data = func_type_extra_index, + }); + + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ + .func = indexToKeyFuncDecl(ip, @intCast(ip.items.len - 4)), + }, adapter); + if (!gop.found_existing) { + assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ .error_union_type = .{ + .error_set_type = @enumFromInt(ip.items.len - 2), + .payload_type = key.bare_return_type, + } }, adapter).found_existing); + assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ + .inferred_error_set_type = @enumFromInt(ip.items.len - 4), + }, adapter).found_existing); + assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ + .func_type = extraFuncType(ip, func_type_extra_index), + }, adapter).found_existing); + return @enumFromInt(ip.items.len - 4); + } + + // An existing function type was found; undo the additions to our two arrays. + ip.items.len -= 4; + ip.extra.items.len = prev_extra_len; + return @enumFromInt(gop.index); +} + +pub fn getErrorSetType( + ip: *InternPool, + gpa: Allocator, + names: []const NullTerminatedString, +) Allocator.Error!Index { + assert(std.sort.isSorted(NullTerminatedString, names, {}, NullTerminatedString.indexLessThan)); + + // The strategy here is to add the type unconditionally, then to ask if it + // already exists, and if so, revert the lengths of the mutated arrays. + // This is similar to what `getOrPutTrailingString` does. + const prev_extra_len = ip.extra.items.len; + + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names.len); + try ip.items.ensureUnusedCapacity(gpa, 1); + + ip.items.appendAssumeCapacity(.{ + .tag = .type_error_set, + .data = ip.addExtraAssumeCapacity(Tag.ErrorSet{ + .names_len = @intCast(names.len), + .names_map = @enumFromInt(ip.maps.items.len), + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast(names)); + + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const key = indexToKeyErrorSetType(ip, @intCast(ip.items.len - 1)); + const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); + if (!gop.found_existing) { + _ = ip.addMap(gpa) catch { + ip.items.len -= 1; + ip.extra.items.len = prev_extra_len; + }; + return @enumFromInt(ip.items.len - 1); + } + + // An existing function type was found; undo the additions to our two arrays. + ip.items.len -= 1; + ip.extra.items.len = prev_extra_len; + return @enumFromInt(gop.index); } pub const GetFuncInstanceKey = struct { - param_types: []const Index, + param_types: []Index, noalias_bits: u32, - return_type: Index, + bare_return_type: Index, cc: std.builtin.CallingConvention, alignment: Alignment, is_noinline: bool, generic_owner: Index, + inferred_error_set: bool, }; -pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, key: GetFuncInstanceKey) Allocator.Error!Index { +pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey) Allocator.Error!Index { _ = ip; _ = gpa; - _ = key; + _ = arg; @panic("TODO"); + //const func_ty = try ip.getFuncType(gpa, .{ + // .param_types = arg.param_types, + // .bare_return_type = arg.bare_return_type, + // .comptime_bits = arg.comptime_bits, + // .noalias_bits = arg.noalias_bits, + // .alignment = arg.alignment, + // .cc = arg.cc, + // .is_var_args = arg.is_var_args, + // .is_generic = arg.is_generic, + // .is_noinline = arg.is_noinline, + // .section_is_generic = arg.section_is_generic, + // .addrspace_is_generic = arg.addrspace_is_generic, + // .inferred_error_set = arg.inferred_error_set, + //}); + + //const fn_owner_decl = ip.declPtr(arg.fn_owner_decl); + //const decl_index = try ip.createDecl(gpa, .{ + // .name = undefined, + // .src_namespace = fn_owner_decl.src_namespace, + // .src_node = fn_owner_decl.src_node, + // .src_line = fn_owner_decl.src_line, + // .has_tv = true, + // .owns_tv = true, + // .ty = func_ty, + // .val = undefined, + // .alignment = .none, + // .@"linksection" = fn_owner_decl.@"linksection", + // .@"addrspace" = fn_owner_decl.@"addrspace", + // .analysis = .complete, + // .deletion_flag = false, + // .zir_decl_index = fn_owner_decl.zir_decl_index, + // .src_scope = fn_owner_decl.src_scope, + // .generation = arg.generation, + // .is_pub = fn_owner_decl.is_pub, + // .is_exported = fn_owner_decl.is_exported, + // .has_linksection_or_addrspace = fn_owner_decl.has_linksection_or_addrspace, + // .has_align = fn_owner_decl.has_align, + // .alive = true, + // .kind = .anon, + //}); + //// TODO: improve this name + //const decl = ip.declPtr(decl_index); + //decl.name = try ip.getOrPutStringFmt(gpa, "{}__anon_{d}", .{ + // fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index), + //}); + + //const gop = try ip.map.getOrPutAdapted(gpa, Key{ + // .func = .{ + // .ty = func_ty, + // .generic_owner = .none, + // .owner_decl = decl_index, + // // Only the above fields will be read for hashing/equality. + // .analysis_extra_index = undefined, + // .zir_body_inst_extra_index = undefined, + // .branch_quota_extra_index = undefined, + // .resolved_error_set_extra_index = undefined, + // .zir_body_inst = undefined, + // .lbrace_line = undefined, + // .rbrace_line = undefined, + // .lbrace_column = undefined, + // .rbrace_column = undefined, + // .comptime_args = undefined, + // }, + //}, KeyAdapter{ .intern_pool = ip }); + //if (gop.found_existing) return @enumFromInt(gop.index); + //try ip.items.append(gpa, .{ + // .tag = .func_decl, + // .data = try ip.addExtra(gpa, .{ + // .analysis = .{ + // .state = if (arg.cc == .Inline) .inline_only else .none, + // .is_cold = false, + // .is_noinline = arg.is_noinline, + // .calls_or_awaits_errorable_fn = false, + // .stack_alignment = .none, + // }, + // .owner_decl = arg.owner_decl, + // .ty = func_ty, + // .zir_body_inst = arg.zir_body_inst, + // .lbrace_line = arg.lbrace_line, + // .rbrace_line = arg.rbrace_line, + // .lbrace_column = arg.lbrace_column, + // .rbrace_column = arg.rbrace_column, + // }), + //}); + //const func_index: InternPool.Index = @enumFromInt(ip.items.len - 1); + //decl.val = func_index.toValue(); + //return func_index; } /// Provides API for completing an enum type after calling `getIncompleteEnum`. @@ -4576,15 +4862,15 @@ pub fn finishGetEnum( .values_map = values_map, }), }); - ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.names))); - ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.values))); - return @as(Index, @enumFromInt(ip.items.len - 1)); + ip.extra.appendSliceAssumeCapacity(@ptrCast(enum_type.names)); + ip.extra.appendSliceAssumeCapacity(@ptrCast(enum_type.values)); + return @enumFromInt(ip.items.len - 1); } pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const index = ip.map.getIndexAdapted(key, adapter) orelse return null; - return @as(Index, @enumFromInt(index)); + return @enumFromInt(index); } pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { @@ -4622,7 +4908,7 @@ fn addIndexesToMap( fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex { const ptr = try ip.maps.addOne(gpa); ptr.* = .{}; - return @as(MapIndex, @enumFromInt(ip.maps.items.len - 1)); + return @enumFromInt(ip.maps.items.len - 1); } /// This operation only happens under compile error conditions. @@ -4653,23 +4939,28 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { const result = @as(u32, @intCast(ip.extra.items.len)); inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { ip.extra.appendAssumeCapacity(switch (field.type) { - u32 => @field(extra, field.name), - Index => @intFromEnum(@field(extra, field.name)), - Module.Decl.Index => @intFromEnum(@field(extra, field.name)), - Module.Namespace.Index => @intFromEnum(@field(extra, field.name)), - Module.Namespace.OptionalIndex => @intFromEnum(@field(extra, field.name)), - MapIndex => @intFromEnum(@field(extra, field.name)), - OptionalMapIndex => @intFromEnum(@field(extra, field.name)), - RuntimeIndex => @intFromEnum(@field(extra, field.name)), - String => @intFromEnum(@field(extra, field.name)), - NullTerminatedString => @intFromEnum(@field(extra, field.name)), - OptionalNullTerminatedString => @intFromEnum(@field(extra, field.name)), - i32 => @as(u32, @bitCast(@field(extra, field.name))), - Tag.TypePointer.Flags => @as(u32, @bitCast(@field(extra, field.name))), - Tag.TypeFunction.Flags => @as(u32, @bitCast(@field(extra, field.name))), - Tag.TypePointer.PackedOffset => @as(u32, @bitCast(@field(extra, field.name))), - Tag.TypePointer.VectorIndex => @intFromEnum(@field(extra, field.name)), - Tag.Variable.Flags => @as(u32, @bitCast(@field(extra, field.name))), + Index, + Module.Decl.Index, + Module.Namespace.Index, + Module.Namespace.OptionalIndex, + MapIndex, + OptionalMapIndex, + RuntimeIndex, + String, + NullTerminatedString, + OptionalNullTerminatedString, + Tag.TypePointer.VectorIndex, + => @intFromEnum(@field(extra, field.name)), + + u32, + i32, + FuncAnalysis, + Tag.TypePointer.Flags, + Tag.TypeFunction.Flags, + Tag.TypePointer.PackedOffset, + Tag.Variable.Flags, + => @bitCast(@field(extra, field.name)), + else => @compileError("bad field type: " ++ @typeName(field.type)), }); } @@ -4720,8 +5011,6 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct inline for (fields, 0..) |field, i| { const int32 = ip.extra.items[i + index]; @field(result, field.name) = switch (field.type) { - u32 => int32, - Index, Module.Decl.Index, Module.Namespace.Index, @@ -4735,6 +5024,7 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct Tag.TypePointer.VectorIndex, => @enumFromInt(int32), + u32, i32, Tag.TypePointer.Flags, Tag.TypeFunction.Flags, @@ -5200,19 +5490,11 @@ pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { const tags = ip.items.items(.tag); const datas = ip.items.items(.data); switch (tags[@intFromEnum(val)]) { - .type_function => return indexToKeyFuncType(ip, datas[@intFromEnum(val)]), + .type_function => return extraFuncType(ip, datas[@intFromEnum(val)]), else => return null, } } -pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.InferredErrorSet.OptionalIndex { - assert(val != .none); - const tags = ip.items.items(.tag); - if (tags[@intFromEnum(val)] != .type_inferred_error_set) return .none; - const datas = ip.items.items(.data); - return @as(Module.InferredErrorSet.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional(); -} - /// includes .comptime_int_type pub fn isIntegerType(ip: *const InternPool, ty: Index) bool { return switch (ty) { @@ -5284,6 +5566,10 @@ pub fn isAggregateType(ip: *const InternPool, ty: Index) bool { }; } +pub fn errorUnionSet(ip: *const InternPool, ty: Index) Index { + return ip.indexToKey(ty).error_union_type.error_set_type; +} + /// The is only legal because the initializer is not part of the hash. pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { const item = ip.items.get(@intFromEnum(index)); @@ -5354,11 +5640,12 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .type_optional => 0, .type_anyframe => 0, .type_error_union => @sizeOf(Key.ErrorUnionType), + .type_anyerror_union => 0, .type_error_set => b: { const info = ip.extraData(Tag.ErrorSet, data); break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len); }, - .type_inferred_error_set => @sizeOf(Module.InferredErrorSet), + .type_inferred_error_set => 0, .type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit), .type_enum_auto => @sizeOf(EnumAuto), .type_opaque => @sizeOf(Key.OpaqueType), @@ -5506,6 +5793,7 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void { .type_optional, .type_anyframe, .type_error_union, + .type_anyerror_union, .type_error_set, .type_inferred_error_set, .type_enum_explicit, @@ -5598,14 +5886,6 @@ pub fn unionPtrConst(ip: *const InternPool, index: Module.Union.Index) *const Mo return ip.allocated_unions.at(@intFromEnum(index)); } -pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.InferredErrorSet.Index) *Module.InferredErrorSet { - return ip.allocated_inferred_error_sets.at(@intFromEnum(index)); -} - -pub fn inferredErrorSetPtrConst(ip: *const InternPool, index: Module.InferredErrorSet.Index) *const Module.InferredErrorSet { - return ip.allocated_inferred_error_sets.at(@intFromEnum(index)); -} - pub fn declPtr(ip: *InternPool, index: Module.Decl.Index) *Module.Decl { return ip.allocated_decls.at(@intFromEnum(index)); } @@ -5658,28 +5938,6 @@ pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) }; } -pub fn createInferredErrorSet( - ip: *InternPool, - gpa: Allocator, - initialization: Module.InferredErrorSet, -) Allocator.Error!Module.InferredErrorSet.Index { - if (ip.inferred_error_sets_free_list.popOrNull()) |index| { - ip.allocated_inferred_error_sets.at(@intFromEnum(index)).* = initialization; - return index; - } - const ptr = try ip.allocated_inferred_error_sets.addOne(gpa); - ptr.* = initialization; - return @as(Module.InferredErrorSet.Index, @enumFromInt(ip.allocated_inferred_error_sets.len - 1)); -} - -pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.InferredErrorSet.Index) void { - ip.inferredErrorSetPtr(index).* = undefined; - ip.inferred_error_sets_free_list.append(gpa, index) catch { - // In order to keep `destroyInferredErrorSet` a non-fallible function, we ignore memory - // allocation failures here, instead leaking the InferredErrorSet until garbage collection. - }; -} - pub fn createDecl( ip: *InternPool, gpa: Allocator, @@ -5912,6 +6170,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .type_optional, .type_anyframe, .type_error_union, + .type_anyerror_union, .type_error_set, .type_inferred_error_set, .type_enum_auto, @@ -6236,7 +6495,10 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .type_optional => .Optional, .type_anyframe => .AnyFrame, - .type_error_union => .ErrorUnion, + + .type_error_union, + .type_anyerror_union, + => .ErrorUnion, .type_error_set, .type_inferred_error_set, @@ -6340,6 +6602,10 @@ pub fn funcAnalysis(ip: *const InternPool, i: Index) *FuncAnalysis { return @ptrCast(&ip.extra.items[extra_index]); } +pub fn funcHasInferredErrorSet(ip: *const InternPool, i: Index) bool { + return funcAnalysis(ip, i).inferred_error_set; +} + pub fn funcZirBodyInst(ip: *const InternPool, i: Index) Zir.Inst.Index { assert(i != .none); const item = ip.items.get(@intFromEnum(i)); @@ -6356,3 +6622,43 @@ pub fn funcZirBodyInst(ip: *const InternPool, i: Index) Zir.Inst.Index { }; return ip.extra.items[extra_index]; } + +pub fn iesFuncIndex(ip: *const InternPool, ies_index: InternPool.Index) InternPool.Index { + assert(ies_index != .none); + const tags = ip.items.items(.tag); + assert(tags[@intFromEnum(ies_index)] == .type_inferred_error_set); + const func_index = ip.items.items(.data)[@intFromEnum(ies_index)]; + switch (tags[func_index]) { + .func_decl, .func_instance => {}, + else => unreachable, // assertion failed + } + return @enumFromInt(func_index); +} + +/// Returns a mutable pointer to the resolved error set type of an inferred +/// error set function. The returned pointer is invalidated when anything is +/// added to `ip`. +pub fn iesResolved(ip: *const InternPool, ies_index: InternPool.Index) *InternPool.Index { + assert(ies_index != .none); + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + assert(tags[@intFromEnum(ies_index)] == .type_inferred_error_set); + const func_index = datas[@intFromEnum(ies_index)]; + return funcIesResolved(ip, func_index); +} + +/// Returns a mutable pointer to the resolved error set type of an inferred +/// error set function. The returned pointer is invalidated when anything is +/// added to `ip`. +pub fn funcIesResolved(ip: *const InternPool, func_index: InternPool.Index) *InternPool.Index { + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + assert(funcHasInferredErrorSet(ip, func_index)); + const func_start = datas[@intFromEnum(func_index)]; + const extra_index = switch (tags[@intFromEnum(func_index)]) { + .func_decl => func_start + @typeInfo(Tag.FuncDecl).Struct.fields.len, + .func_instance => func_start + @typeInfo(Tag.FuncInstance).Struct.fields.len, + else => unreachable, + }; + return @ptrCast(&ip.extra.items[extra_index]); +} diff --git a/src/Module.zig b/src/Module.zig index 3dd924d17a..e750364b44 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1297,98 +1297,6 @@ pub const Union = struct { } }; -/// Some extern function struct memory is owned by the Decl's TypedValue.Managed -/// arena allocator. -pub const ExternFn = struct { - /// The Decl that corresponds to the function itself. - owner_decl: Decl.Index, - /// Library name if specified. - /// For example `extern "c" fn write(...) usize` would have 'c' as library name. - /// Allocated with Module's allocator; outlives the ZIR code. - lib_name: ?[*:0]const u8, - - pub fn deinit(extern_fn: *ExternFn, gpa: Allocator) void { - if (extern_fn.lib_name) |lib_name| { - gpa.free(mem.sliceTo(lib_name, 0)); - } - } -}; - -/// This struct is used to keep track of any dependencies related to functions instances -/// that return inferred error sets. Note that a function may be associated to -/// multiple different error sets, for example an inferred error set which -/// this function returns, but also any inferred error sets of called inline -/// or comptime functions. -pub const InferredErrorSet = struct { - /// The function from which this error set originates. - func: InternPool.Index, - - /// All currently known errors that this error set contains. This includes - /// direct additions via `return error.Foo;`, and possibly also errors that - /// are returned from any dependent functions. When the inferred error set is - /// fully resolved, this map contains all the errors that the function might return. - errors: NameMap = .{}, - - /// Other inferred error sets which this inferred error set should include. - inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{}, - - /// Whether the function returned anyerror. This is true if either of - /// the dependent functions returns anyerror. - is_anyerror: bool = false, - - /// Whether this error set is already fully resolved. If true, resolving - /// can skip resolving any dependents of this inferred error set. - is_resolved: bool = false, - - pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); - - pub const Index = enum(u32) { - _, - - pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex { - return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(i))); - } - }; - - pub const OptionalIndex = enum(u32) { - none = std.math.maxInt(u32), - _, - - pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex { - return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); - } - - pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index { - if (oi == .none) return null; - return @as(InferredErrorSet.Index, @enumFromInt(@intFromEnum(oi))); - } - }; - - pub fn addErrorSet( - self: *InferredErrorSet, - err_set_ty: Type, - ip: *InternPool, - gpa: Allocator, - ) !void { - switch (err_set_ty.toIntern()) { - .anyerror_type => { - self.is_anyerror = true; - }, - else => switch (ip.indexToKey(err_set_ty.toIntern())) { - .error_set_type => |error_set_type| { - for (error_set_type.names) |name| { - try self.errors.put(gpa, name, {}); - } - }, - .inferred_error_set_type => |ies_index| { - try self.inferred_error_sets.put(gpa, ies_index, {}); - }, - else => unreachable, - }, - } - } -}; - pub const DeclAdapter = struct { mod: *Module, @@ -3220,10 +3128,6 @@ pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { return mod.intern_pool.structPtr(index); } -pub fn inferredErrorSetPtr(mod: *Module, index: InferredErrorSet.Index) *InferredErrorSet { - return mod.intern_pool.inferredErrorSetPtr(index); -} - pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace { return mod.namespacePtr(index.unwrap() orelse return null); } @@ -4261,6 +4165,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .owner_decl_index = new_decl_index, .func_index = .none, .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, .owner_func_index = .none, .comptime_mutable_decls = &comptime_mutable_decls, }; @@ -4342,6 +4247,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { .owner_decl_index = decl_index, .func_index = .none, .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, .owner_func_index = .none, .comptime_mutable_decls = &comptime_mutable_decls, }; @@ -5289,12 +5195,19 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato .owner_decl_index = decl_index, .func_index = func_index, .fn_ret_ty = fn_ty_info.return_type.toType(), + .fn_ret_ty_ies = null, .owner_func_index = func_index, .branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota), .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); + if (func.analysis(ip).inferred_error_set) { + const ies = try arena.create(Sema.InferredErrorSet); + ies.* = .{ .func = func_index }; + sema.fn_ret_ty_ies = ies; + } + // reset in case calls to errorable functions are removed. func.analysis(ip).calls_or_awaits_errorable_fn = false; @@ -5433,7 +5346,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + inner_block.instructions.items.len); const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ - .body_len = @as(u32, @intCast(inner_block.instructions.items.len)), + .body_len = @intCast(inner_block.instructions.items.len), }); sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items); sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index; @@ -5445,7 +5358,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato // Crucially, this happens *after* we set the function state to success above, // so that dependencies on the function body will now be satisfied rather than // result in circular dependency errors. - sema.resolveFnTypes(fn_ty) catch |err| switch (err) { + sema.resolveFnTypes(&inner_block, LazySrcLoc.nodeOffset(0), fn_ty) catch |err| switch (err) { error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, @@ -6595,7 +6508,8 @@ pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Alloca pub fn singleErrorSetType(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type { const names: *const [1]InternPool.NullTerminatedString = &name; - return (try mod.intern_pool.get(mod.gpa, .{ .error_set_type = .{ .names = names } })).toType(); + const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names); + return new_ty.toType(); } /// Sorts `names` in place. @@ -6609,7 +6523,7 @@ pub fn errorSetFromUnsortedNames( {}, InternPool.NullTerminatedString.indexLessThan, ); - const new_ty = try mod.intern(.{ .error_set_type = .{ .names = names } }); + const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names); return new_ty.toType(); } @@ -6956,16 +6870,6 @@ pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType { return mod.intern_pool.indexToFuncType(ty.toIntern()); } -pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*InferredErrorSet { - const index = typeToInferredErrorSetIndex(mod, ty).unwrap() orelse return null; - return mod.inferredErrorSetPtr(index); -} - -pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) InferredErrorSet.OptionalIndex { - if (ty.ip_index == .none) return .none; - return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern()); -} - pub fn funcOwnerDeclPtr(mod: *Module, func_index: InternPool.Index) *Decl { return mod.declPtr(mod.funcOwnerDeclIndex(func_index)); } @@ -6974,6 +6878,10 @@ pub fn funcOwnerDeclIndex(mod: *Module, func_index: InternPool.Index) Decl.Index return mod.funcInfo(func_index).owner_decl; } +pub fn iesFuncIndex(mod: *const Module, ies_index: InternPool.Index) InternPool.Index { + return mod.intern_pool.iesFuncIndex(ies_index); +} + pub fn funcInfo(mod: *Module, func_index: InternPool.Index) InternPool.Key.Func { return mod.intern_pool.indexToKey(func_index).func; } @@ -7040,19 +6948,3 @@ pub fn getParamName(mod: *Module, func_index: InternPool.Index, index: u32) [:0] else => unreachable, }; } - -pub fn hasInferredErrorSet(mod: *Module, func: InternPool.Key.Func) bool { - const owner_decl = mod.declPtr(func.owner_decl); - const zir = owner_decl.getFileScope(mod).zir; - const zir_tags = zir.instructions.items(.tag); - switch (zir_tags[func.zir_body_inst]) { - .func => return false, - .func_inferred => return true, - .func_fancy => { - const inst_data = zir.instructions.items(.data)[func.zir_body_inst].pl_node; - const extra = zir.extraData(Zir.Inst.FuncFancy, inst_data.payload_index); - return extra.data.bits.is_inferred_error; - }, - else => unreachable, - } -} diff --git a/src/Sema.zig b/src/Sema.zig index 25cacc01ea..9cd6acfc60 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -38,6 +38,10 @@ error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none, /// generic function which uses a type expression for the return type. /// The type will be `void` in the case that `func` is `null`. fn_ret_ty: Type, +/// In case of the return type being an error union with an inferred error +/// set, this is the inferred error set. `null` otherwise. Allocated with +/// `Sema.arena`. +fn_ret_ty_ies: ?*InferredErrorSet, branch_quota: u32 = default_branch_quota, branch_count: u32 = 0, /// Populated when returning `error.ComptimeBreak`. Used to communicate the @@ -128,6 +132,46 @@ const Alignment = InternPool.Alignment; pub const default_branch_quota = 1000; pub const default_reference_trace_len = 2; +pub const InferredErrorSet = struct { + /// The function body from which this error set originates. + func: InternPool.Index, + + /// All currently known errors that this error set contains. This includes + /// direct additions via `return error.Foo;`, and possibly also errors that + /// are returned from any dependent functions. When the inferred error set is + /// fully resolved, this map contains all the errors that the function might return. + errors: NameMap = .{}, + + /// Other inferred error sets which this inferred error set should include. + inferred_error_sets: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, + + pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); + + pub fn addErrorSet( + self: *InferredErrorSet, + err_set_ty: Type, + ip: *InternPool, + arena: Allocator, + ) !void { + switch (err_set_ty.toIntern()) { + .anyerror_type => { + ip.funcIesResolved(self.func).* = .anyerror_type; + }, + else => switch (ip.indexToKey(err_set_ty.toIntern())) { + .error_set_type => |error_set_type| { + for (error_set_type.names.get(ip)) |name| { + try self.errors.put(arena, name, {}); + } + }, + .inferred_error_set_type => { + try self.inferred_error_sets.put(arena, err_set_ty.toIntern(), {}); + }, + else => unreachable, + }, + } + } +}; + /// Stores the mapping from `Zir.Inst.Index -> Air.Inst.Ref`, which is used by sema to resolve /// instructions during analysis. /// Instead of a hash table approach, InstMap is simply a slice that is indexed into using the @@ -1120,7 +1164,7 @@ fn analyzeBodyInner( .shl_sat => try sema.zirShl(block, inst, .shl_sat), .ret_ptr => try sema.zirRetPtr(block), - .ret_type => try sema.addType(sema.fn_ret_ty), + .ret_type => Air.internedToRef(sema.fn_ret_ty.toIntern()), // Instructions that we know to *always* be noreturn based solely on their tag. // These functions match the return type of analyzeBody so that we can @@ -3392,7 +3436,7 @@ fn zirErrorSetDecl( const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index); - var names: Module.InferredErrorSet.NameMap = .{}; + var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len); var extra_index = @as(u32, @intCast(extra.end)); @@ -6933,12 +6977,10 @@ fn analyzeCall( .return_type = owner_info.return_type, .comptime_bits = 0, .noalias_bits = owner_info.noalias_bits, - .alignment = owner_info.alignment, - .cc = owner_info.cc, + .alignment = if (owner_info.align_is_generic) null else owner_info.alignment, + .cc = if (owner_info.cc_is_generic) null else owner_info.cc, .is_var_args = owner_info.is_var_args, .is_noinline = owner_info.is_noinline, - .align_is_generic = owner_info.align_is_generic, - .cc_is_generic = owner_info.cc_is_generic, .section_is_generic = owner_info.section_is_generic, .addrspace_is_generic = owner_info.addrspace_is_generic, .is_generic = owner_info.is_generic, @@ -7001,21 +7043,25 @@ fn analyzeCall( try sema.resolveInst(fn_info.ret_ty_ref); const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst); - // Create a fresh inferred error set type for inline/comptime calls. - const fn_ret_ty = blk: { - if (mod.hasInferredErrorSet(module_fn)) { - const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ - .func = module_fn_index, - }); - const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); - break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); - } - break :blk bare_return_type; - }; - new_fn_info.return_type = fn_ret_ty.toIntern(); const parent_fn_ret_ty = sema.fn_ret_ty; - sema.fn_ret_ty = fn_ret_ty; + const parent_fn_ret_ty_ies = sema.fn_ret_ty_ies; + sema.fn_ret_ty = bare_return_type; + sema.fn_ret_ty_ies = null; defer sema.fn_ret_ty = parent_fn_ret_ty; + defer sema.fn_ret_ty_ies = parent_fn_ret_ty_ies; + + if (module_fn.analysis(ip).inferred_error_set) { + // Create a fresh inferred error set type for inline/comptime calls. + const error_set_ty = try mod.intern(.{ .inferred_error_set_type = module_fn_index }); + const ies = try sema.arena.create(InferredErrorSet); + ies.* = .{ .func = module_fn_index }; + sema.fn_ret_ty_ies = ies; + sema.fn_ret_ty = (try ip.get(gpa, .{ .error_union_type = .{ + .error_set_type = error_set_ty, + .payload_type = bare_return_type.toIntern(), + } })).toType(); + ip.funcIesResolved(module_fn_index).* = .none; + } // This `res2` is here instead of directly breaking from `res` due to a stage1 // bug generating invalid LLVM IR. @@ -7059,7 +7105,7 @@ fn analyzeCall( } if (is_comptime_call and ensure_result_used) { - try sema.ensureResultUsed(block, fn_ret_ty, call_src); + try sema.ensureResultUsed(block, sema.fn_ret_ty, call_src); } const result = result: { @@ -7089,7 +7135,7 @@ fn analyzeCall( if (should_memoize and is_comptime_call) { const result_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, result, ""); - const result_interned = try result_val.intern(fn_ret_ty, mod); + const result_interned = try result_val.intern(sema.fn_ret_ty, mod); // TODO: check whether any external comptime memory was mutated by the // comptime function call. If so, then do not memoize the call here. @@ -7114,7 +7160,7 @@ fn analyzeCall( if (i < fn_params_len) { const opts: CoerceOpts = .{ .param_src = .{ .func_inst = func, - .param_i = @as(u32, @intCast(i)), + .param_i = @intCast(i), } }; const param_ty = func_ty_info.param_types.get(ip)[i].toType(); args[i] = sema.analyzeCallArg( @@ -7433,6 +7479,7 @@ fn instantiateGenericCall( .owner_decl_index = sema.owner_decl_index, .func_index = sema.owner_func_index, .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, .owner_func_index = .none, .comptime_args = comptime_args, .generic_owner = generic_owner, @@ -7769,6 +7816,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD defer tracy.end(); const mod = sema.mod; + const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -7779,7 +7827,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD if (val.isUndef(mod)) { return sema.addConstUndef(Type.err_int); } - const err_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; + const err_name = ip.indexToKey(val.toIntern()).err.name; return sema.addConstant(try mod.intValue( Type.err_int, try mod.getErrorValue(err_name), @@ -7787,17 +7835,19 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD } const op_ty = sema.typeOf(uncasted_operand); - try sema.resolveInferredErrorSetTy(block, src, op_ty); - if (!op_ty.isAnyError(mod)) { - const names = op_ty.errorSetNames(mod); - switch (names.len) { - 0 => return sema.addConstant(try mod.intValue(Type.err_int, 0)), - 1 => { - const int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(names[0]).?)); - return sema.addIntUnsigned(Type.err_int, int); - }, - else => {}, - } + switch (try sema.resolveInferredErrorSetTy(block, src, op_ty.toIntern())) { + .anyerror_type => {}, + else => |err_set_ty_index| { + const names = ip.indexToKey(err_set_ty_index).error_set_type.names; + switch (names.len) { + 0 => return sema.addConstant(try mod.intValue(Type.err_int, 0)), + 1 => { + const int: Module.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[0]).?); + return sema.addIntUnsigned(Type.err_int, int); + }, + else => {}, + } + }, } try sema.requireRuntimeBlock(block, src, operand_src); @@ -7846,6 +7896,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr defer tracy.end(); const mod = sema.mod; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; @@ -7874,23 +7925,25 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr return Air.Inst.Ref.anyerror_type; } - if (mod.typeToInferredErrorSetIndex(lhs_ty).unwrap()) |ies_index| { - try sema.resolveInferredErrorSet(block, src, ies_index); - // isAnyError might have changed from a false negative to a true positive after resolution. - if (lhs_ty.isAnyError(mod)) { - return Air.Inst.Ref.anyerror_type; + if (ip.isInferredErrorSetType(lhs_ty.toIntern())) { + switch (try sema.resolveInferredErrorSet(block, src, lhs_ty.toIntern())) { + // isAnyError might have changed from a false negative to a true + // positive after resolution. + .anyerror_type => return .anyerror_type, + else => {}, } } - if (mod.typeToInferredErrorSetIndex(rhs_ty).unwrap()) |ies_index| { - try sema.resolveInferredErrorSet(block, src, ies_index); - // isAnyError might have changed from a false negative to a true positive after resolution. - if (rhs_ty.isAnyError(mod)) { - return Air.Inst.Ref.anyerror_type; + if (ip.isInferredErrorSetType(rhs_ty.toIntern())) { + switch (try sema.resolveInferredErrorSet(block, src, rhs_ty.toIntern())) { + // isAnyError might have changed from a false negative to a true + // positive after resolution. + .anyerror_type => return .anyerror_type, + else => {}, } } const err_set_ty = try sema.errorSetMerge(lhs_ty, rhs_ty); - return sema.addType(err_set_ty); + return Air.internedToRef(err_set_ty.toIntern()); } fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -8569,6 +8622,12 @@ fn checkCallConvSupportsVarArgs(sema: *Sema, block: *Block, src: LazySrcLoc, cc: } } +const Section = union(enum) { + generic, + default, + explicit: InternPool.NullTerminatedString, +}; + fn funcCommon( sema: *Sema, block: *Block, @@ -8578,7 +8637,7 @@ fn funcCommon( alignment: ?Alignment, /// null means generic poison address_space: ?std.builtin.AddressSpace, - section: InternPool.GetFuncDeclKey.Section, + section: Section, /// null means generic poison cc: ?std.builtin.CallingConvention, /// this might be Type.generic_poison @@ -8709,6 +8768,36 @@ fn funcCommon( const param_types = block.params.items(.ty); const opt_func_index: InternPool.Index = i: { + if (!is_source_decl) { + assert(has_body); + assert(!is_generic); + assert(comptime_bits == 0); + assert(cc != null); + assert(section != .generic); + assert(address_space != null); + assert(!var_args); + break :i try ip.getFuncInstance(gpa, .{ + .param_types = param_types, + .noalias_bits = noalias_bits, + .bare_return_type = bare_return_type.toIntern(), + .cc = cc_resolved, + .alignment = alignment.?, + .is_noinline = is_noinline, + .inferred_error_set = inferred_error_set, + .generic_owner = sema.generic_owner, + }); + } + + // extern_func and func_decl functions take ownership of `sema.owner_decl`. + + sema.owner_decl.@"linksection" = switch (section) { + .generic => .none, + .default => .none, + .explicit => |section_name| section_name.toOptional(), + }; + sema.owner_decl.alignment = alignment orelse .none; + sema.owner_decl.@"addrspace" = address_space orelse .generic; + if (is_extern) { assert(comptime_bits == 0); assert(cc != null); @@ -8734,26 +8823,19 @@ fn funcCommon( if (!has_body) break :i .none; - if (is_source_decl) { - if (inferred_error_set) - try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); - - const fn_owner_decl = if (sema.generic_owner != .none) - mod.funcOwnerDeclIndex(sema.generic_owner) - else - sema.owner_decl_index; + if (inferred_error_set) { + try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); + break :i try ip.getFuncDeclIes(gpa, .{ + .owner_decl = sema.owner_decl_index, - break :i try ip.getFuncDecl(gpa, .{ - .fn_owner_decl = fn_owner_decl, .param_types = param_types, .noalias_bits = noalias_bits, .comptime_bits = comptime_bits, - .return_type = bare_return_type.toIntern(), - .inferred_error_set = inferred_error_set, + .bare_return_type = bare_return_type.toIntern(), .cc = cc, .alignment = alignment, - .section = section, - .address_space = address_space, + .section_is_generic = section == .generic, + .addrspace_is_generic = address_space == null, .is_var_args = var_args, .is_generic = final_is_generic, .is_noinline = is_noinline, @@ -8766,22 +8848,30 @@ fn funcCommon( }); } - assert(!is_generic); - assert(comptime_bits == 0); - assert(cc != null); - assert(section != .generic); - assert(address_space != null); - assert(!var_args); - - break :i try ip.getFuncInstance(gpa, .{ + const func_ty = try ip.getFuncType(gpa, .{ .param_types = param_types, .noalias_bits = noalias_bits, + .comptime_bits = comptime_bits, .return_type = bare_return_type.toIntern(), - .cc = cc_resolved, - .alignment = alignment.?, + .cc = cc, + .alignment = alignment, + .section_is_generic = section == .generic, + .addrspace_is_generic = address_space == null, + .is_var_args = var_args, + .is_generic = final_is_generic, .is_noinline = is_noinline, + }); - .generic_owner = sema.generic_owner, + break :i try ip.getFuncDecl(gpa, .{ + .owner_decl = sema.owner_decl_index, + .ty = func_ty, + .cc = cc, + .is_noinline = is_noinline, + .zir_body_inst = func_inst, + .lbrace_line = src_locs.lbrace_line, + .rbrace_line = src_locs.rbrace_line, + .lbrace_column = @as(u16, @truncate(src_locs.columns)), + .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)), }); }; @@ -8913,10 +9003,8 @@ fn funcCommon( .noalias_bits = noalias_bits, .comptime_bits = comptime_bits, .return_type = return_type.toIntern(), - .cc = cc_resolved, - .cc_is_generic = cc == null, - .alignment = alignment orelse .none, - .align_is_generic = alignment == null, + .cc = cc, + .alignment = alignment, .section_is_generic = section == .generic, .addrspace_is_generic = address_space == null, .is_var_args = var_args, @@ -10254,7 +10342,7 @@ const SwitchProngAnalysis = struct { return sema.bitCast(block, item_ty, spa.operand, operand_src, null); } - var names: Module.InferredErrorSet.NameMap = .{}; + var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, case_vals.len); for (case_vals) |err| { const err_val = sema.resolveConstValue(block, .unneeded, err, "") catch unreachable; @@ -10622,97 +10710,100 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r } } - try sema.resolveInferredErrorSetTy(block, src, operand_ty); - - if (operand_ty.isAnyError(mod)) { - if (special_prong != .@"else") { - return sema.fail( - block, - src, - "else prong required when switching on type 'anyerror'", - .{}, - ); - } - else_error_ty = Type.anyerror; - } else else_validation: { - var maybe_msg: ?*Module.ErrorMsg = null; - errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa); + switch (try sema.resolveInferredErrorSetTy(block, src, operand_ty.toIntern())) { + .anyerror_type => { + if (special_prong != .@"else") { + return sema.fail( + block, + src, + "else prong required when switching on type 'anyerror'", + .{}, + ); + } + else_error_ty = Type.anyerror; + }, + else => |err_set_ty_index| else_validation: { + const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names; + var maybe_msg: ?*Module.ErrorMsg = null; + errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa); + + for (error_names.get(ip)) |error_name| { + if (!seen_errors.contains(error_name) and special_prong != .@"else") { + const msg = maybe_msg orelse blk: { + maybe_msg = try sema.errMsg( + block, + src, + "switch must handle all possibilities", + .{}, + ); + break :blk maybe_msg.?; + }; - for (operand_ty.errorSetNames(mod)) |error_name| { - if (!seen_errors.contains(error_name) and special_prong != .@"else") { - const msg = maybe_msg orelse blk: { - maybe_msg = try sema.errMsg( + try sema.errNote( block, src, - "switch must handle all possibilities", - .{}, + msg, + "unhandled error value: 'error.{}'", + .{error_name.fmt(ip)}, ); - break :blk maybe_msg.?; - }; - - try sema.errNote( - block, - src, - msg, - "unhandled error value: 'error.{}'", - .{error_name.fmt(ip)}, - ); + } } - } - if (maybe_msg) |msg| { - maybe_msg = null; - try sema.addDeclaredHereNote(msg, operand_ty); - return sema.failWithOwnedErrorMsg(msg); - } + if (maybe_msg) |msg| { + maybe_msg = null; + try sema.addDeclaredHereNote(msg, operand_ty); + return sema.failWithOwnedErrorMsg(msg); + } - if (special_prong == .@"else" and seen_errors.count() == operand_ty.errorSetNames(mod).len) { - // In order to enable common patterns for generic code allow simple else bodies - // else => unreachable, - // else => return, - // else => |e| return e, - // even if all the possible errors were already handled. - const tags = sema.code.instructions.items(.tag); - for (special.body) |else_inst| switch (tags[else_inst]) { - .dbg_block_begin, - .dbg_block_end, - .dbg_stmt, - .dbg_var_val, - .ret_type, - .as_node, - .ret_node, - .@"unreachable", - .@"defer", - .defer_err_code, - .err_union_code, - .ret_err_value_code, - .restore_err_ret_index, - .is_non_err, - .ret_is_non_err, - .condbr, - => {}, - else => break, - } else break :else_validation; + if (special_prong == .@"else" and + seen_errors.count() == error_names.len) + { + // In order to enable common patterns for generic code allow simple else bodies + // else => unreachable, + // else => return, + // else => |e| return e, + // even if all the possible errors were already handled. + const tags = sema.code.instructions.items(.tag); + for (special.body) |else_inst| switch (tags[else_inst]) { + .dbg_block_begin, + .dbg_block_end, + .dbg_stmt, + .dbg_var_val, + .ret_type, + .as_node, + .ret_node, + .@"unreachable", + .@"defer", + .defer_err_code, + .err_union_code, + .ret_err_value_code, + .restore_err_ret_index, + .is_non_err, + .ret_is_non_err, + .condbr, + => {}, + else => break, + } else break :else_validation; - return sema.fail( - block, - special_prong_src, - "unreachable else prong; all cases already handled", - .{}, - ); - } + return sema.fail( + block, + special_prong_src, + "unreachable else prong; all cases already handled", + .{}, + ); + } - const error_names = operand_ty.errorSetNames(mod); - var names: Module.InferredErrorSet.NameMap = .{}; - try names.ensureUnusedCapacity(sema.arena, error_names.len); - for (error_names) |error_name| { - if (seen_errors.contains(error_name)) continue; + var names: InferredErrorSet.NameMap = .{}; + try names.ensureUnusedCapacity(sema.arena, error_names.len); + for (error_names.get(ip)) |error_name| { + if (seen_errors.contains(error_name)) continue; - names.putAssumeCapacityNoClobber(error_name, {}); - } - // No need to keep the hash map metadata correct; here we - // extract the (sorted) keys only. - else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); + names.putAssumeCapacityNoClobber(error_name, {}); + } + // No need to keep the hash map metadata correct; here we + // extract the (sorted) keys only. + else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); + }, } }, .Int, .ComptimeInt => { @@ -16444,50 +16535,51 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.queueFullTypeResolution(error_field_ty); - // If the error set is inferred it must be resolved at this point - try sema.resolveInferredErrorSetTy(block, src, ty); - // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise - const error_field_vals = if (ty.isAnyError(mod)) null else blk: { - const vals = try sema.arena.alloc(InternPool.Index, ty.errorSetNames(mod).len); - for (vals, 0..) |*field_val, i| { - // TODO: write something like getCoercedInts to avoid needing to dupe - const name = try sema.arena.dupe(u8, ip.stringToSlice(ty.errorSetNames(mod)[i])); - const name_val = v: { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - const new_decl_ty = try mod.arrayType(.{ - .len = name.len, - .child = .u8_type, - }); - const new_decl = try anon_decl.finish( - new_decl_ty, - (try mod.intern(.{ .aggregate = .{ - .ty = new_decl_ty.toIntern(), - .storage = .{ .bytes = name }, - } })).toValue(), - .none, // default alignment - ); - break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, - .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, name.len)).toIntern(), - } }); - }; + const error_field_vals = switch (try sema.resolveInferredErrorSetTy(block, src, ty.toIntern())) { + .anyerror_type => null, + else => |err_set_ty_index| blk: { + const names = ip.indexToKey(err_set_ty_index).error_set_type.names; + const vals = try sema.arena.alloc(InternPool.Index, names.len); + for (vals, 0..) |*field_val, i| { + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(names.get(ip)[i])); + const name_val = v: { + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + }); + const new_decl = try anon_decl.finish( + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), + .none, // default alignment + ); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); + }; - const error_field_fields = .{ - // name: []const u8, - name_val, - }; - field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = error_field_ty.toIntern(), - .storage = .{ .elems = &error_field_fields }, - } }); - } + const error_field_fields = .{ + // name: []const u8, + name_val, + }; + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = error_field_ty.toIntern(), + .storage = .{ .elems = &error_field_fields }, + } }); + } - break :blk vals; + break :blk vals; + }, }; // Build our ?[]const Error value @@ -18055,7 +18147,9 @@ fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { const ip = &mod.intern_pool; assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion); - if (mod.typeToInferredErrorSet(sema.fn_ret_ty.errorUnionSet(mod))) |ies| { + if (ip.isInferredErrorSetType(sema.fn_ret_ty.errorUnionSet(mod).toIntern())) { + const ies = sema.fn_ret_ty_ies.?; + assert(ies.func == sema.func_index); const op_ty = sema.typeOf(uncasted_operand); switch (op_ty.zigTypeTag(mod)) { .ErrorSet => try ies.addErrorSet(op_ty, ip, gpa), @@ -19508,7 +19602,7 @@ fn zirReify( return sema.addType(Type.anyerror); const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod)); - var names: Module.InferredErrorSet.NameMap = .{}; + var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); for (0..len) |i| { const elem_val = try payload_val.elemValue(mod, i); @@ -20019,8 +20113,6 @@ fn zirReify( .is_var_args = is_var_args, .is_generic = false, .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, .section_is_generic = false, .addrspace_is_generic = false, }); @@ -20524,8 +20616,8 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat break :disjoint true; } - try sema.resolveInferredErrorSetTy(block, src, dest_ty); - try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty); + _ = try sema.resolveInferredErrorSetTy(block, src, dest_ty.toIntern()); + _ = try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty.toIntern()); for (dest_ty.errorSetNames(mod)) |dest_err_name| { if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name)) break :disjoint false; @@ -23505,7 +23597,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val); } else target_util.defaultAddressSpace(target, .function); - const section: InternPool.GetFuncDeclKey.Section = if (extra.data.bits.has_section_body) blk: { + const section: Section = if (extra.data.bits.has_section_body) blk: { const body_len = sema.code.extra[extra_index]; extra_index += 1; const body = sema.code.extra[extra_index..][0..body_len]; @@ -27750,42 +27842,22 @@ fn coerceInMemoryAllowedErrorSets( return .ok; } - if (mod.typeToInferredErrorSetIndex(dest_ty).unwrap()) |dst_ies_index| { - const dst_ies = mod.inferredErrorSetPtr(dst_ies_index); - // We will make an effort to return `ok` without resolving either error set, to - // avoid unnecessary "unable to resolve error set" dependency loop errors. - switch (src_ty.toIntern()) { - .anyerror_type => {}, - else => switch (ip.indexToKey(src_ty.toIntern())) { - .inferred_error_set_type => |src_index| { - // If both are inferred error sets of functions, and - // the dest includes the source function, the coercion is OK. - // This check is important because it works without forcing a full resolution - // of inferred error sets. - if (dst_ies.inferred_error_sets.contains(src_index)) { - return .ok; - } - }, - .error_set_type => |error_set_type| { - for (error_set_type.names) |name| { - if (!dst_ies.errors.contains(name)) break; - } else return .ok; - }, - else => unreachable, - }, - } - - if (dst_ies.func == sema.owner_func_index) { - // We are trying to coerce an error set to the current function's - // inferred error set. - try dst_ies.addErrorSet(src_ty, ip, gpa); - return .ok; + if (ip.isInferredErrorSetType(dest_ty.toIntern())) { + const dst_ies_func_index = ip.iesFuncIndex(dest_ty.toIntern()); + if (sema.fn_ret_ty_ies) |dst_ies| { + if (dst_ies.func == dst_ies_func_index) { + // We are trying to coerce an error set to the current function's + // inferred error set. + try dst_ies.addErrorSet(src_ty, ip, gpa); + return .ok; + } } - try sema.resolveInferredErrorSet(block, dest_src, dst_ies_index); - // isAnyError might have changed from a false negative to a true positive after resolution. - if (dest_ty.isAnyError(mod)) { - return .ok; + switch (try sema.resolveInferredErrorSet(block, dest_src, dest_ty.toIntern())) { + // isAnyError might have changed from a false negative to a true + // positive after resolution. + .anyerror_type => return .ok, + else => {}, } } @@ -27800,17 +27872,15 @@ fn coerceInMemoryAllowedErrorSets( }, else => switch (ip.indexToKey(src_ty.toIntern())) { - .inferred_error_set_type => |src_index| { - const src_data = mod.inferredErrorSetPtr(src_index); - - try sema.resolveInferredErrorSet(block, src_src, src_index); + .inferred_error_set_type => { + const resolved_src_ty = try sema.resolveInferredErrorSet(block, src_src, src_ty.toIntern()); // src anyerror status might have changed after the resolution. - if (src_ty.isAnyError(mod)) { + if (resolved_src_ty == .anyerror_type) { // dest_ty.isAnyError(mod) == true is already checked for at this point. return .from_anyerror; } - for (src_data.errors.keys()) |key| { + for (ip.indexToKey(resolved_src_ty).error_set_type.names.get(ip)) |key| { if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), key)) { try missing_error_buf.append(key); } @@ -27825,7 +27895,7 @@ fn coerceInMemoryAllowedErrorSets( return .ok; }, .error_set_type => |error_set_type| { - for (error_set_type.names) |name| { + for (error_set_type.names.get(ip)) |name| { if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), name)) { try missing_error_buf.append(name); } @@ -30341,73 +30411,72 @@ fn analyzeIsNonErrComptimeOnly( operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; const operand_ty = sema.typeOf(operand); const ot = operand_ty.zigTypeTag(mod); - if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; - if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; + if (ot != .ErrorSet and ot != .ErrorUnion) return .bool_true; + if (ot == .ErrorSet) return .bool_false; assert(ot == .ErrorUnion); const payload_ty = operand_ty.errorUnionPayload(mod); if (payload_ty.zigTypeTag(mod) == .NoReturn) { - return Air.Inst.Ref.bool_false; + return .bool_false; } if (Air.refToIndex(operand)) |operand_inst| { switch (sema.air_instructions.items(.tag)[operand_inst]) { - .wrap_errunion_payload => return Air.Inst.Ref.bool_true, - .wrap_errunion_err => return Air.Inst.Ref.bool_false, + .wrap_errunion_payload => return .bool_true, + .wrap_errunion_err => return .bool_false, else => {}, } } else if (operand == .undef) { return sema.addConstUndef(Type.bool); } else if (@intFromEnum(operand) < InternPool.static_len) { // None of the ref tags can be errors. - return Air.Inst.Ref.bool_true; + return .bool_true; } const maybe_operand_val = try sema.resolveMaybeUndefVal(operand); // exception if the error union error set is known to be empty, // we allow the comparison but always make it comptime-known. - const set_ty = operand_ty.errorUnionSet(mod); - switch (set_ty.toIntern()) { + const set_ty = ip.errorUnionSet(operand_ty.toIntern()); + switch (set_ty) { .anyerror_type => {}, - else => switch (mod.intern_pool.indexToKey(set_ty.toIntern())) { + else => switch (ip.indexToKey(set_ty)) { .error_set_type => |error_set_type| { - if (error_set_type.names.len == 0) return Air.Inst.Ref.bool_true; + if (error_set_type.names.len == 0) return .bool_true; }, - .inferred_error_set_type => |ies_index| blk: { + .inferred_error_set_type => |func_index| blk: { // If the error set is empty, we must return a comptime true or false. // However we want to avoid unnecessarily resolving an inferred error set // in case it is already non-empty. - const ies = mod.inferredErrorSetPtr(ies_index); - if (ies.is_anyerror) break :blk; - if (ies.errors.count() != 0) break :blk; + switch (ip.funcIesResolved(func_index).*) { + .anyerror_type => break :blk, + .none => {}, + else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk, + } if (maybe_operand_val == null) { - // Try to avoid resolving inferred error set if possible. - if (ies.errors.count() != 0) break :blk; - if (ies.is_anyerror) break :blk; - for (ies.inferred_error_sets.keys()) |other_ies_index| { - if (ies_index == other_ies_index) continue; - try sema.resolveInferredErrorSet(block, src, other_ies_index); - const other_ies = mod.inferredErrorSetPtr(other_ies_index); - if (other_ies.is_anyerror) { - ies.is_anyerror = true; - ies.is_resolved = true; - break :blk; + if (sema.fn_ret_ty_ies) |ies| if (ies.func == func_index) { + // Try to avoid resolving inferred error set if possible. + for (ies.inferred_error_sets.keys()) |other_ies_index| { + if (set_ty == other_ies_index) continue; + const other_resolved = + try sema.resolveInferredErrorSet(block, src, other_ies_index); + if (other_resolved == .anyerror_type) { + ip.funcIesResolved(func_index).* = .anyerror_type; + break :blk; + } + if (ip.indexToKey(other_resolved).error_set_type.names.len != 0) + break :blk; } - - if (other_ies.errors.count() != 0) break :blk; - } - if (ies.func == sema.owner_func_index) { - // We're checking the inferred errorset of the current function and none of - // its child inferred error sets contained any errors meaning that any value - // so far with this type can't contain errors either. - return Air.Inst.Ref.bool_true; - } - try sema.resolveInferredErrorSet(block, src, ies_index); - if (ies.is_anyerror) break :blk; - if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true; + return .bool_true; + }; + const resolved_ty = try sema.resolveInferredErrorSet(block, src, set_ty); + if (resolved_ty == .anyerror_type) + break :blk; + if (ip.indexToKey(resolved_ty).error_set_type.names.len == 0) + return .bool_true; } }, else => unreachable, @@ -30419,12 +30488,12 @@ fn analyzeIsNonErrComptimeOnly( return sema.addConstUndef(Type.bool); } if (err_union.getErrorName(mod) == .none) { - return Air.Inst.Ref.bool_true; + return .bool_true; } else { - return Air.Inst.Ref.bool_false; + return .bool_false; } } - return Air.Inst.Ref.none; + return .none; } fn analyzeIsNonErr( @@ -31365,16 +31434,19 @@ fn wrapErrorUnionSet( if (error_set_type.nameIndex(ip, expected_name) != null) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, - .inferred_error_set_type => |ies_index| ok: { - const ies = mod.inferredErrorSetPtr(ies_index); - const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; - + .inferred_error_set_type => |func_index| ok: { // We carefully do this in an order that avoids unnecessarily // resolving the destination error set type. - if (ies.is_anyerror) break :ok; - - if (ies.errors.contains(expected_name)) break :ok; - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) break :ok; + const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; + switch (ip.funcIesResolved(func_index).*) { + .anyerror_type => break :ok, + .none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { + break :ok; + }, + else => |i| if (ip.indexToKey(i).error_set_type.nameIndex(ip, expected_name) != null) { + break :ok; + }, + } return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, @@ -32862,10 +32934,13 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike { }; } -pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { +pub fn resolveFnTypes(sema: *Sema, block: *Block, src: LazySrcLoc, fn_ty: Type) CompileError!void { const mod = sema.mod; const ip = &mod.intern_pool; const fn_ty_info = mod.typeToFunc(fn_ty).?; + + if (sema.fn_ret_ty_ies) |ies| try sema.resolveInferredErrorSetPtr(block, src, ies); + try sema.resolveTypeFully(fn_ty_info.return_type.toType()); if (mod.comp.bin_file.options.error_return_tracing and fn_ty_info.return_type.toType().isError(mod)) { @@ -33173,6 +33248,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .owner_decl_index = decl_index, .func_index = .none, .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, .owner_func_index = .none, .comptime_mutable_decls = &comptime_mutable_decls, }; @@ -33223,6 +33299,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .owner_decl_index = decl_index, .func_index = .none, .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, .owner_func_index = .none, .comptime_mutable_decls = undefined, }; @@ -33797,30 +33874,31 @@ fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_obj: *Module.Union) Compi union_obj.status = .have_field_types; } +/// Returns a normal error set corresponding to the fully populated inferred +/// error set. fn resolveInferredErrorSet( sema: *Sema, block: *Block, src: LazySrcLoc, - ies_index: Module.InferredErrorSet.Index, -) CompileError!void { + ies_index: InternPool.Index, +) CompileError!InternPool.Index { const mod = sema.mod; const ip = &mod.intern_pool; - const ies = mod.inferredErrorSetPtr(ies_index); - - if (ies.is_resolved) return; - - const func = mod.funcInfo(ies.func); - if (func.analysis(ip).state == .in_progress) { + const func_index = ip.iesFuncIndex(ies_index); + const func = mod.funcInfo(func_index); + const resolved_ty = func.resolvedErrorSet(ip).*; + if (resolved_ty != .none) return resolved_ty; + if (func.analysis(ip).state == .in_progress) return sema.fail(block, src, "unable to resolve inferred error set", .{}); - } - // In order to ensure that all dependencies are properly added to the set, we - // need to ensure the function body is analyzed of the inferred error set. - // However, in the case of comptime/inline function calls with inferred error sets, - // each call gets a new InferredErrorSet object, which contains the same - // `InternPool.Index`. Not only is the function not relevant to the inferred error set - // in this case, it may be a generic function which would cause an assertion failure - // if we called `ensureFuncBodyAnalyzed` on it here. + // In order to ensure that all dependencies are properly added to the set, + // we need to ensure the function body is analyzed of the inferred error + // set. However, in the case of comptime/inline function calls with + // inferred error sets, each call gets a new InferredErrorSet object, which + // contains the `InternPool.Index` of the callee. Not only is the function + // not relevant to the inferred error set in this case, it may be a generic + // function which would cause an assertion failure if we called + // `ensureFuncBodyAnalyzed` on it here. const ies_func_owner_decl = mod.declPtr(func.owner_decl); const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?; // if ies declared by a inline function with generic return type, the return_type should be generic_poison, @@ -33828,7 +33906,7 @@ fn resolveInferredErrorSet( // so here we can simply skip this case. if (ies_func_info.return_type == .generic_poison_type) { assert(ies_func_info.cc == .Inline); - } else if (mod.typeToInferredErrorSet(ies_func_info.return_type.toType().errorUnionSet(mod)).? == ies) { + } else if (ip.errorUnionSet(ies_func_info.return_type) == ies_index) { if (ies_func_info.is_generic) { const msg = msg: { const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{}); @@ -33841,33 +33919,62 @@ fn resolveInferredErrorSet( } // In this case we are dealing with the actual InferredErrorSet object that // corresponds to the function, not one created to track an inline/comptime call. - try sema.ensureFuncBodyAnalyzed(ies.func); + try sema.ensureFuncBodyAnalyzed(func_index); } - ies.is_resolved = true; + // This will now have been resolved by the logic at the end of `Module.analyzeFnBody` + // which calls `resolveInferredErrorSetPtr`. + const final_resolved_ty = func.resolvedErrorSet(ip).*; + assert(final_resolved_ty != .none); + return final_resolved_ty; +} + +fn resolveInferredErrorSetPtr( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + ies: *InferredErrorSet, +) CompileError!void { + const mod = sema.mod; + const ip = &mod.intern_pool; + + const func = mod.funcInfo(ies.func); + if (func.resolvedErrorSet(ip).* != .none) return; + + const ies_index = ip.errorUnionSet(sema.fn_ret_ty.toIntern()); for (ies.inferred_error_sets.keys()) |other_ies_index| { if (ies_index == other_ies_index) continue; - try sema.resolveInferredErrorSet(block, src, other_ies_index); - - const other_ies = mod.inferredErrorSetPtr(other_ies_index); - for (other_ies.errors.keys()) |key| { - try ies.errors.put(sema.gpa, key, {}); + switch (try sema.resolveInferredErrorSet(block, src, other_ies_index)) { + .anyerror_type => { + func.resolvedErrorSet(ip).* = .anyerror_type; + return; + }, + else => |error_set_ty_index| { + const names = ip.indexToKey(error_set_ty_index).error_set_type.names; + for (names.get(ip)) |name| { + try ies.errors.put(sema.arena, name, {}); + } + }, } - if (other_ies.is_anyerror) - ies.is_anyerror = true; } + + const resolved_error_set_ty = try mod.errorSetFromUnsortedNames(ies.errors.keys()); + func.resolvedErrorSet(ip).* = resolved_error_set_ty.toIntern(); } fn resolveInferredErrorSetTy( sema: *Sema, block: *Block, src: LazySrcLoc, - ty: Type, -) CompileError!void { + ty: InternPool.Index, +) CompileError!InternPool.Index { const mod = sema.mod; - if (mod.typeToInferredErrorSetIndex(ty).unwrap()) |ies_index| { - try sema.resolveInferredErrorSet(block, src, ies_index); + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty)) { + .error_set_type => return ty, + .inferred_error_set_type => return sema.resolveInferredErrorSet(block, src, ty), + else => unreachable, } } @@ -33937,6 +34044,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .owner_decl_index = decl_index, .func_index = .none, .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, .owner_func_index = .none, .comptime_mutable_decls = &comptime_mutable_decls, }; @@ -34282,6 +34390,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .owner_decl_index = decl_index, .func_index = .none, .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, .owner_func_index = .none, .comptime_mutable_decls = &comptime_mutable_decls, }; @@ -34893,6 +35002,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .var_args_param_type, .none, => unreachable, + _ => switch (mod.intern_pool.items.items(.tag)[@intFromEnum(ty.toIntern())]) { .type_int_signed, // i0 handled above .type_int_unsigned, // u0 handled above @@ -34901,6 +35011,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .type_optional, // ?noreturn handled above .type_anyframe, .type_error_union, + .type_anyerror_union, .type_error_set, .type_inferred_error_set, .type_opaque, @@ -36354,7 +36465,7 @@ fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { const arena = sema.arena; const lhs_names = lhs.errorSetNames(mod); const rhs_names = rhs.errorSetNames(mod); - var names: Module.InferredErrorSet.NameMap = .{}; + var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(arena, lhs_names.len); for (lhs_names) |name| { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index d26af7be7f..4960414499 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -6061,8 +6061,6 @@ pub const FuncGen = struct { .is_var_args = false, .is_generic = false, .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, .section_is_generic = false, .addrspace_is_generic = false, }); @@ -10657,30 +10655,31 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { } fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool { - if (!fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) return false; + const return_type = fn_info.return_type.toType(); + if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) return false; const target = mod.getTarget(); switch (fn_info.cc) { - .Unspecified, .Inline => return isByRef(fn_info.return_type.toType(), mod), + .Unspecified, .Inline => return isByRef(return_type, mod), .C => switch (target.cpu.arch) { .mips, .mipsel => return false, .x86_64 => switch (target.os.tag) { - .windows => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, - else => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), + .windows => return x86_64_abi.classifyWindows(return_type, mod) == .memory, + else => return firstParamSRetSystemV(return_type, mod), }, - .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type.toType(), mod)[0] == .indirect, - .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, - .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type.toType(), mod, .ret)) { + .wasm32 => return wasm_c_abi.classifyType(return_type, mod)[0] == .indirect, + .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(return_type, mod) == .memory, + .arm, .armeb => switch (arm_c_abi.classifyType(return_type, mod, .ret)) { .memory, .i64_array => return true, .i32_array => |size| return size != 1, .byval => return false, }, - .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, + .riscv32, .riscv64 => return riscv_c_abi.classifyType(return_type, mod) == .memory, else => return false, // TODO investigate C ABI for other architectures }, - .SysV => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), - .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, - .Stdcall => return !isScalar(mod, fn_info.return_type.toType()), + .SysV => return firstParamSRetSystemV(return_type, mod), + .Win64 => return x86_64_abi.classifyWindows(return_type, mod) == .memory, + .Stdcall => return !isScalar(mod, return_type), else => return false, } } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 499855b330..6b7744644e 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -1043,6 +1043,7 @@ pub fn commitDeclState( var dbg_line_buffer = &decl_state.dbg_line; var dbg_info_buffer = &decl_state.dbg_info; const decl = mod.declPtr(decl_index); + const ip = &mod.intern_pool; const target_endian = self.target.cpu.arch.endian(); @@ -1241,20 +1242,9 @@ pub fn commitDeclState( while (sym_index < decl_state.abbrev_table.items.len) : (sym_index += 1) { const symbol = &decl_state.abbrev_table.items[sym_index]; const ty = symbol.type; - const deferred: bool = blk: { - if (ty.isAnyError(mod)) break :blk true; - switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .inferred_error_set_type => |ies_index| { - const ies = mod.inferredErrorSetPtr(ies_index); - if (!ies.is_resolved) break :blk true; - }, - else => {}, - } - break :blk false; - }; - if (deferred) continue; + if (ip.isErrorSetType(ty.toIntern())) continue; - symbol.offset = @as(u32, @intCast(dbg_info_buffer.items.len)); + symbol.offset = @intCast(dbg_info_buffer.items.len); try decl_state.addDbgInfoType(mod, di_atom_index, ty); } } @@ -1265,18 +1255,7 @@ pub fn commitDeclState( if (reloc.target) |target| { const symbol = decl_state.abbrev_table.items[target]; const ty = symbol.type; - const deferred: bool = blk: { - if (ty.isAnyError(mod)) break :blk true; - switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .inferred_error_set_type => |ies_index| { - const ies = mod.inferredErrorSetPtr(ies_index); - if (!ies.is_resolved) break :blk true; - }, - else => {}, - } - break :blk false; - }; - if (deferred) { + if (ip.isErrorSetType(ty.toIntern())) { log.debug("resolving %{d} deferred until flush", .{target}); try self.global_abbrev_relocs.append(gpa, .{ .target = null, @@ -2505,18 +2484,18 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { defer arena_alloc.deinit(); const arena = arena_alloc.allocator(); - // TODO: don't create a zig type for this, just make the dwarf info - // without touching the zig type system. - const names = try arena.dupe(InternPool.NullTerminatedString, module.global_error_set.keys()); - std.mem.sort(InternPool.NullTerminatedString, names, {}, InternPool.NullTerminatedString.indexLessThan); - - const error_ty = try module.intern(.{ .error_set_type = .{ .names = names } }); var dbg_info_buffer = std.ArrayList(u8).init(arena); - try addDbgInfoErrorSet(module, error_ty.toType(), self.target, &dbg_info_buffer); + try addDbgInfoErrorSetNames( + module, + Type.anyerror, + module.global_error_set.keys(), + self.target, + &dbg_info_buffer, + ); const di_atom_index = try self.createAtom(.di_atom); log.debug("updateDeclDebugInfoAllocation in flushModule", .{}); - try self.updateDeclDebugInfoAllocation(di_atom_index, @as(u32, @intCast(dbg_info_buffer.items.len))); + try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(dbg_info_buffer.items.len)); log.debug("writeDeclDebugInfo in flushModule", .{}); try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items); @@ -2633,6 +2612,17 @@ fn addDbgInfoErrorSet( ty: Type, target: std.Target, dbg_info_buffer: *std.ArrayList(u8), +) !void { + return addDbgInfoErrorSetNames(mod, ty, ty.errorSetNames(mod), target, dbg_info_buffer); +} + +fn addDbgInfoErrorSetNames( + mod: *Module, + /// Used for printing the type name only. + ty: Type, + error_names: []const InternPool.NullTerminatedString, + target: std.Target, + dbg_info_buffer: *std.ArrayList(u8), ) !void { const target_endian = target.cpu.arch.endian(); @@ -2655,7 +2645,6 @@ fn addDbgInfoErrorSet( // DW.AT.const_value, DW.FORM.data8 mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian); - const error_names = ty.errorSetNames(mod); for (error_names) |error_name_ip| { const int = try mod.getErrorValue(error_name_ip); const error_name = mod.intern_pool.stringToSlice(error_name_ip); diff --git a/src/type.zig b/src/type.zig index 2381deee94..d9d4270ed0 100644 --- a/src/type.zig +++ b/src/type.zig @@ -251,20 +251,19 @@ pub const Type = struct { return; }, .inferred_error_set_type => |index| { - const ies = mod.inferredErrorSetPtr(index); - const func = ies.func; - + const func = mod.iesFuncIndex(index); try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); const owner_decl = mod.funcOwnerDeclPtr(func); try owner_decl.renderFullyQualifiedName(mod, writer); try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); }, .error_set_type => |error_set_type| { + const ip = &mod.intern_pool; const names = error_set_type.names; try writer.writeAll("error{"); - for (names, 0..) |name, i| { + for (names.get(ip), 0..) |name, i| { if (i != 0) try writer.writeByte(','); - try writer.print("{}", .{name.fmt(&mod.intern_pool)}); + try writer.print("{}", .{name.fmt(ip)}); } try writer.writeAll("}"); }, @@ -2051,21 +2050,19 @@ pub const Type = struct { /// Asserts that the type is an error union. pub fn errorUnionSet(ty: Type, mod: *Module) Type { - return mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.error_set_type.toType(); + return mod.intern_pool.errorUnionSet(ty.toIntern()).toType(); } /// Returns false for unresolved inferred error sets. pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; return switch (ty.toIntern()) { .anyerror_type => false, - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + else => switch (ip.indexToKey(ty.toIntern())) { .error_set_type => |error_set_type| error_set_type.names.len == 0, - .inferred_error_set_type => |index| { - const inferred_error_set = mod.inferredErrorSetPtr(index); - // Can't know for sure. - if (!inferred_error_set.is_resolved) return false; - if (inferred_error_set.is_anyerror) return false; - return inferred_error_set.errors.count() == 0; + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .none, .anyerror_type => false, + else => |t| ip.indexToKey(t).error_set_type.names.len == 0, }, else => unreachable, }, @@ -2076,10 +2073,11 @@ pub const Type = struct { /// Note that the result may be a false negative if the type did not get error set /// resolution prior to this call. pub fn isAnyError(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; return switch (ty.toIntern()) { .anyerror_type => true, else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .inferred_error_set_type => |i| mod.inferredErrorSetPtr(i).is_anyerror, + .inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type, else => false, }, }; @@ -2103,13 +2101,11 @@ pub const Type = struct { return switch (ty) { .anyerror_type => true, else => switch (ip.indexToKey(ty)) { - .error_set_type => |error_set_type| { - return error_set_type.nameIndex(ip, name) != null; - }, - .inferred_error_set_type => |index| { - const ies = ip.inferredErrorSetPtrConst(index); - if (ies.is_anyerror) return true; - return ies.errors.contains(name); + .error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .anyerror_type => true, + .none => false, + else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null, }, else => unreachable, }, @@ -2129,12 +2125,14 @@ pub const Type = struct { const field_name_interned = ip.getString(name).unwrap() orelse return false; return error_set_type.nameIndex(ip, field_name_interned) != null; }, - .inferred_error_set_type => |index| { - const ies = ip.inferredErrorSetPtr(index); - if (ies.is_anyerror) return true; - // If the string is not interned, then the field certainly is not present. - const field_name_interned = ip.getString(name).unwrap() orelse return false; - return ies.errors.contains(field_name_interned); + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .anyerror_type => true, + .none => false, + else => |t| { + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return ip.indexToKey(t).error_set_type.nameIndex(ip, field_name_interned) != null; + }, }, else => unreachable, }, @@ -2943,14 +2941,15 @@ pub const Type = struct { } // Asserts that `ty` is an error set and not `anyerror`. + // Asserts that `ty` is resolved if it is an inferred error set. pub fn errorSetNames(ty: Type, mod: *Module) []const InternPool.NullTerminatedString { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .error_set_type => |x| x.names, - .inferred_error_set_type => |index| { - const inferred_error_set = mod.inferredErrorSetPtr(index); - assert(inferred_error_set.is_resolved); - assert(!inferred_error_set.is_anyerror); - return inferred_error_set.errors.keys(); + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .error_set_type => |x| x.names.get(ip), + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .none => unreachable, // unresolved inferred error set + .anyerror_type => unreachable, + else => |t| ip.indexToKey(t).error_set_type.names.get(ip), }, else => unreachable, }; -- cgit v1.2.3 From c193872c814ab2c9fefcc884782301157ba8c29e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 9 Jul 2023 15:31:47 -0700 Subject: InternPool: implement indexToKey for func_instance and func_decl Also delete incorrect frees an arena-allocated parameters. --- src/InternPool.zig | 75 ++++++++++++++++++++++++++++++++++++++++++++---------- src/Module.zig | 5 +--- src/Sema.zig | 10 ++------ 3 files changed, 64 insertions(+), 26 deletions(-) (limited to 'src/Module.zig') diff --git a/src/InternPool.zig b/src/InternPool.zig index bbfcff1568..ff66c97ab7 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -3178,8 +3178,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, .extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) }, - .func_instance => .{ .func = ip.indexToKeyFuncInstance(data) }, - .func_decl => .{ .func = ip.extraIndexToFuncDecl(data) }, + .func_instance => .{ .func = ip.extraFuncInstance(data) }, + .func_decl => .{ .func = ip.extraFuncDecl(data) }, .only_possible_value => { const ty = @as(Index, @enumFromInt(data)); const ty_item = ip.items.get(@intFromEnum(ty)); @@ -3328,16 +3328,48 @@ fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType { }; } -fn extraIndexToFuncDecl(ip: *const InternPool, extra_index: u32) Key.Func { - _ = ip; - _ = extra_index; - @panic("TODO"); +fn extraFuncDecl(ip: *const InternPool, extra_index: u32) Key.Func { + const P = Tag.FuncDecl; + const func_decl = ip.extraDataTrail(P, extra_index); + return .{ + .ty = func_decl.data.ty, + .analysis_extra_index = extra_index + std.meta.fieldIndex(P, "analysis").?, + .zir_body_inst_extra_index = extra_index + std.meta.fieldIndex(P, "zir_body_inst").?, + .resolved_error_set_extra_index = if (func_decl.data.analysis.inferred_error_set) func_decl.end else 0, + .branch_quota_extra_index = 0, + .owner_decl = func_decl.data.owner_decl, + .zir_body_inst = func_decl.data.zir_body_inst, + .lbrace_line = func_decl.data.lbrace_line, + .rbrace_line = func_decl.data.rbrace_line, + .lbrace_column = func_decl.data.lbrace_column, + .rbrace_column = func_decl.data.lbrace_column, + .generic_owner = .none, + .comptime_args = .{ .start = 0, .len = 0 }, + }; } -fn indexToKeyFuncInstance(ip: *const InternPool, data: u32) Key.Func { - _ = ip; - _ = data; - @panic("TODO"); +fn extraFuncInstance(ip: *const InternPool, extra_index: u32) Key.Func { + const P = Tag.FuncInstance; + const fi = ip.extraDataTrail(P, extra_index); + const func_decl = ip.funcDeclInfo(fi.data.generic_owner); + return .{ + .ty = fi.data.ty, + .analysis_extra_index = extra_index + std.meta.fieldIndex(P, "analysis").?, + .zir_body_inst_extra_index = func_decl.zir_body_inst_extra_index, + .resolved_error_set_extra_index = if (fi.data.analysis.inferred_error_set) fi.end else 0, + .branch_quota_extra_index = extra_index + std.meta.fieldIndex(P, "branch_quota").?, + .owner_decl = fi.data.owner_decl, + .zir_body_inst = func_decl.zir_body_inst, + .lbrace_line = func_decl.lbrace_line, + .rbrace_line = func_decl.rbrace_line, + .lbrace_column = func_decl.lbrace_column, + .rbrace_column = func_decl.lbrace_column, + .generic_owner = fi.data.generic_owner, + .comptime_args = .{ + .start = fi.end + @intFromBool(fi.data.analysis.inferred_error_set), + .len = ip.funcTypeParamsLen(func_decl.ty), + }, + }; } fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { @@ -4421,7 +4453,7 @@ pub fn getFuncDecl(ip: *InternPool, gpa: Allocator, key: GetFuncDeclKey) Allocat const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ - .func = extraIndexToFuncDecl(ip, func_decl_extra_index), + .func = extraFuncDecl(ip, func_decl_extra_index), }, adapter); if (gop.found_existing) { @@ -4544,7 +4576,7 @@ pub fn getFuncDeclIes(ip: *InternPool, gpa: Allocator, key: GetFuncDeclIesKey) A const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ - .func = extraIndexToFuncDecl(ip, func_decl_extra_index), + .func = extraFuncDecl(ip, func_decl_extra_index), }, adapter); if (!gop.found_existing) { assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ .error_union_type = .{ @@ -5052,7 +5084,7 @@ fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const Limb) void { } } -fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct { data: T, end: usize } { +fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct { data: T, end: u32 } { var result: T = undefined; const fields = @typeInfo(T).Struct.fields; inline for (fields, 0..) |field, i| { @@ -5085,7 +5117,7 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct } return .{ .data = result, - .end = index + fields.len, + .end = @intCast(index + fields.len), }; } @@ -6713,3 +6745,18 @@ pub fn funcIesResolved(ip: *const InternPool, func_index: InternPool.Index) *Int }; return @ptrCast(&ip.extra.items[extra_index]); } + +pub fn funcDeclInfo(ip: *const InternPool, i: InternPool.Index) Key.Func { + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + assert(tags[@intFromEnum(i)] == .func_decl); + return extraFuncDecl(ip, datas[@intFromEnum(i)]); +} + +pub fn funcTypeParamsLen(ip: *const InternPool, i: InternPool.Index) u32 { + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + assert(tags[@intFromEnum(i)] == .type_function); + const start = datas[@intFromEnum(i)]; + return ip.extra.items[start + std.meta.fieldIndex(Tag.TypeFunction, "params_len").?]; +} diff --git a/src/Module.zig b/src/Module.zig index e750364b44..83449f093b 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -4279,10 +4279,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { .inlining = null, .is_comptime = true, }; - defer { - block_scope.instructions.deinit(gpa); - block_scope.params.deinit(gpa); - } + defer block_scope.instructions.deinit(gpa); const zir_block_index = decl.zirBlockIndex(mod); const inst_data = zir_datas[zir_block_index].pl_node; diff --git a/src/Sema.zig b/src/Sema.zig index 9cd6acfc60..d8c55e5aeb 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -34063,10 +34063,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .inlining = null, .is_comptime = true, }; - defer { - assert(block_scope.instructions.items.len == 0); - block_scope.params.deinit(gpa); - } + defer assert(block_scope.instructions.items.len == 0); struct_obj.fields = .{}; try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); @@ -34409,10 +34406,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .inlining = null, .is_comptime = true, }; - defer { - assert(block_scope.instructions.items.len == 0); - block_scope.params.deinit(gpa); - } + defer assert(block_scope.instructions.items.len == 0); if (body.len != 0) { try sema.analyzeBody(&block_scope, body); -- cgit v1.2.3 From d15e8f8017758fb77dd6e839ef3f39b174522c5c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 16 Jul 2023 23:17:19 -0700 Subject: Sema: resolve inferred error set with function state in_progress This way dependency loops are reported instead of the compiler crashing. --- src/Module.zig | 23 ++++++++++++++++++++++- src/Sema.zig | 24 +++++++++++++++--------- 2 files changed, 37 insertions(+), 10 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index 83449f093b..45536547d8 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5348,6 +5348,27 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items); sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index; + // Resolving inferred error sets is done *before* setting the function + // state to success, so that "unable to resolve inferred error set" errors + // can be emitted here. + if (sema.fn_ret_ty_ies) |ies| { + sema.resolveInferredErrorSetPtr(&inner_block, LazySrcLoc.nodeOffset(0), ies) catch |err| switch (err) { + error.NeededSourceLocation => unreachable, + error.GenericPoison => unreachable, + error.ComptimeReturn => unreachable, + error.ComptimeBreak => unreachable, + error.AnalysisFail => { + // In this case our function depends on a type that had a compile error. + // We should not try to lower this function. + decl.analysis = .dependency_failure; + return error.AnalysisFail; + }, + else => |e| return e, + }; + assert(ies.resolved != .none); + ip.funcIesResolved(func_index).* = ies.resolved; + } + func.analysis(ip).state = .success; // Finally we must resolve the return type and parameter types so that backends @@ -5355,7 +5376,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato // Crucially, this happens *after* we set the function state to success above, // so that dependencies on the function body will now be satisfied rather than // result in circular dependency errors. - sema.resolveFnTypes(&inner_block, LazySrcLoc.nodeOffset(0), fn_ty) catch |err| switch (err) { + sema.resolveFnTypes(fn_ty) catch |err| switch (err) { error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, diff --git a/src/Sema.zig b/src/Sema.zig index e02e99faf2..3f59548b47 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -30619,12 +30619,13 @@ fn analyzeIsNonErrComptimeOnly( ies.func == func_index) { // Try to avoid resolving inferred error set if possible. - if (ies.errors.count() != 0) break :blk; + if (ies.errors.count() != 0) return .none; switch (ies.resolved) { - .anyerror_type => break :blk, + .anyerror_type => return .none, .none => {}, - else => if (ip.indexToKey(ies.resolved).error_set_type.names.len != 0) { - break :blk; + else => switch (ip.indexToKey(ies.resolved).error_set_type.names.len) { + 0 => return .bool_true, + else => return .none, }, } for (ies.inferred_error_sets.keys()) |other_ies_index| { @@ -30633,10 +30634,10 @@ fn analyzeIsNonErrComptimeOnly( try sema.resolveInferredErrorSet(block, src, other_ies_index); if (other_resolved == .anyerror_type) { ies.resolved = .anyerror_type; - break :blk; + return .none; } if (ip.indexToKey(other_resolved).error_set_type.names.len != 0) - break :blk; + return .none; } return .bool_true; } @@ -33113,16 +33114,21 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike { }; } -pub fn resolveFnTypes(sema: *Sema, block: *Block, src: LazySrcLoc, fn_ty: Type) CompileError!void { +pub fn resolveIes(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError!void { const mod = sema.mod; const ip = &mod.intern_pool; - const fn_ty_info = mod.typeToFunc(fn_ty).?; if (sema.fn_ret_ty_ies) |ies| { try sema.resolveInferredErrorSetPtr(block, src, ies); assert(ies.resolved != .none); ip.funcIesResolved(sema.func_index).* = ies.resolved; } +} + +pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { + const mod = sema.mod; + const ip = &mod.intern_pool; + const fn_ty_info = mod.typeToFunc(fn_ty).?; try sema.resolveTypeFully(fn_ty_info.return_type.toType()); @@ -34111,7 +34117,7 @@ fn resolveInferredErrorSet( return final_resolved_ty; } -fn resolveInferredErrorSetPtr( +pub fn resolveInferredErrorSetPtr( sema: *Sema, block: *Block, src: LazySrcLoc, -- cgit v1.2.3 From 3f2a4720b1ef057215c8240b3a377c523ee63e94 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 17 Jul 2023 17:47:59 -0700 Subject: compiler: fix branch regressions * getOwnedFunctionIndex no longer checks if the value is actually a function. * The callsites to `intern` that I added want to avoid the `getCoerced` call, so I added `intern2`. * Adding to inferred error sets should not happen if the destination error set is not the inferred error set of the current Sema instance. * adhoc_inferred_error_set_type can be seen by the backend. Treat it like anyerror. --- src/Module.zig | 3 ++- src/Sema.zig | 6 +++--- src/link/Coff.zig | 2 +- src/type.zig | 2 +- src/value.zig | 5 +++++ 5 files changed, 12 insertions(+), 6 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index 45536547d8..d9ee3a57ad 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -751,6 +751,7 @@ pub const Decl = struct { }; } + /// This returns an InternPool.Index even when the value is not a function. pub fn getOwnedFunctionIndex(decl: Decl) InternPool.Index { return if (decl.owns_tv) decl.val.toIntern() else .none; } @@ -4978,7 +4979,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err decl.has_align = has_align; decl.has_linksection_or_addrspace = has_linksection_or_addrspace; decl.zir_decl_index = @as(u32, @intCast(decl_sub_index)); - if (decl.getOwnedFunctionIndex() != .none) { + if (decl.getOwnedFunction(mod) != null) { switch (comp.bin_file.tag) { .coff, .elf, .macho, .plan9 => { // TODO Look into detecting when this would be unnecessary by storing enough state diff --git a/src/Sema.zig b/src/Sema.zig index 3f59548b47..5cee5718f8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7140,7 +7140,7 @@ fn analyzeCall( if (should_memoize and is_comptime_call) { const result_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, result, ""); - const result_interned = try result_val.intern(sema.fn_ret_ty, mod); + const result_interned = try result_val.intern2(sema.fn_ret_ty, mod); // Transform ad-hoc inferred error set types into concrete error sets. const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned); @@ -7157,7 +7157,7 @@ fn analyzeCall( } if (try sema.resolveMaybeUndefVal(result)) |result_val| { - const result_interned = try result_val.intern(sema.fn_ret_ty, mod); + const result_interned = try result_val.intern2(sema.fn_ret_ty, mod); const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned); break :res2 Air.internedToRef(result_transformed); } @@ -18319,7 +18319,7 @@ fn analyzeRet( // add the error tag to the inferred error set of the in-scope function, so // that the coercion below works correctly. const mod = sema.mod; - if (sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { + if (sema.fn_ret_ty_ies != null and sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(uncasted_operand); } const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, .{ .is_ret = true }) catch |err| switch (err) { diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 202f4a13f9..8720fc1037 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1424,7 +1424,7 @@ pub fn updateDeclExports( // detect the default subsystem. for (exports) |exp| { const exported_decl = mod.declPtr(exp.exported_decl); - if (exported_decl.getOwnedFunctionIndex() == .none) continue; + if (exported_decl.getOwnedFunction(mod) == null) continue; const winapi_cc = switch (self.base.options.target.cpu.arch) { .x86 => std.builtin.CallingConvention.Stdcall, else => std.builtin.CallingConvention.C, diff --git a/src/type.zig b/src/type.zig index ceb795a870..c70544d189 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2238,7 +2238,7 @@ pub const Type = struct { var ty = starting_ty; while (true) switch (ty.toIntern()) { - .anyerror_type => { + .anyerror_type, .adhoc_inferred_error_set_type => { // TODO revisit this when error sets support custom int types return .{ .signedness = .unsigned, .bits = 16 }; }, diff --git a/src/value.zig b/src/value.zig index f1371e9689..ec0f359671 100644 --- a/src/value.zig +++ b/src/value.zig @@ -262,6 +262,11 @@ pub const Value = struct { return ip.getOrPutTrailingString(gpa, len); } + pub fn intern2(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { + if (val.ip_index != .none) return val.ip_index; + return intern(val, ty, mod); + } + pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { if (val.ip_index != .none) return (try mod.getCoerced(val, ty)).toIntern(); switch (val.tag()) { -- cgit v1.2.3 From 0153f3a8f9b93ebef7b5cd70db8560fcac658ce7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 18 Jul 2023 17:12:19 -0700 Subject: Sema: fix crash: array_in_c_exported_function Fuck it, we're storing decl indexes in LazySrcLoc now. --- src/Module.zig | 16 ++++++--- src/Sema.zig | 39 +++++++++------------- .../array_in_c_exported_function.zig | 4 +-- 3 files changed, 28 insertions(+), 31 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index d9ee3a57ad..3ae78c30b1 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1821,8 +1821,8 @@ pub const SrcLoc = struct { return tree.firstToken(src_loc.parent_decl_node); } - pub fn declRelativeToNodeIndex(src_loc: SrcLoc, offset: i32) Ast.TokenIndex { - return @as(Ast.Node.Index, @bitCast(offset + @as(i32, @bitCast(src_loc.parent_decl_node)))); + pub fn declRelativeToNodeIndex(src_loc: SrcLoc, offset: i32) Ast.Node.Index { + return @bitCast(offset + @as(i32, @bitCast(src_loc.parent_decl_node))); } pub const Span = struct { @@ -2829,14 +2829,15 @@ pub const LazySrcLoc = union(enum) { /// The Decl is determined contextually. for_capture_from_input: i32, /// The source location points to the argument node of a function call. - /// The Decl is determined contextually. call_arg: struct { + decl: Decl.Index, /// Points to the function call AST node. call_node_offset: i32, /// The index of the argument the source location points to. arg_index: u32, }, fn_proto_param: struct { + decl: Decl.Index, /// Points to the function prototype AST node. fn_proto_node_offset: i32, /// The index of the parameter the source location points to. @@ -2931,13 +2932,18 @@ pub const LazySrcLoc = union(enum) { .node_offset_store_operand, .for_input, .for_capture_from_input, - .call_arg, - .fn_proto_param, => .{ .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, .lazy = lazy, }, + inline .call_arg, + .fn_proto_param, + => |x| .{ + .file_scope = decl.getFileScope(mod), + .parent_decl_node = mod.declPtr(x.decl).src_node, + .lazy = lazy, + }, }; } }; diff --git a/src/Sema.zig b/src/Sema.zig index 88dd3c83da..87ca0b7c60 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6997,10 +6997,14 @@ fn analyzeCall( var has_comptime_args = false; var arg_i: u32 = 0; for (fn_info.param_body) |inst| { - const arg_src: LazySrcLoc = .{ .call_arg = .{ - .call_node_offset = call_src.node_offset.x, - .arg_index = arg_i, - } }; + const arg_src: LazySrcLoc = if (arg_i == 0 and bound_arg_src != null) + bound_arg_src.? + else + .{ .call_arg = .{ + .decl = block.src_decl, + .call_node_offset = call_src.node_offset.x, + .arg_index = arg_i - @intFromBool(bound_arg_src != null), + } }; try sema.analyzeInlineCallArg( block, &child_block, @@ -7356,7 +7360,7 @@ fn analyzeInlineCallArg( } const casted_arg = sema.coerceExtra(arg_block, param_ty.toType(), uncasted_arg, arg_src, .{ .param_src = .{ .func_inst = func_inst, - .param_i = @as(u32, @intCast(arg_i.*)), + .param_i = @intCast(arg_i.*), } }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, @@ -7586,6 +7590,7 @@ fn instantiateGenericCall( const arg_src: LazySrcLoc = if (total_i == 0 and bound_arg_src != null) bound_arg_src.? else if (call_src == .node_offset) .{ .call_arg = .{ + .decl = block.src_decl, .call_node_offset = call_src.node_offset.x, .arg_index = @intCast(total_i), } } else .unneeded; @@ -8729,6 +8734,7 @@ fn funcCommon( break :blk @as(u1, @truncate(noalias_bits >> index)) != 0; }; const param_src: LazySrcLoc = .{ .fn_proto_param = .{ + .decl = block.src_decl, .fn_proto_node_offset = src_node_offset, .param_index = @intCast(i), } }; @@ -9316,9 +9322,10 @@ fn zirParamAnytype( return; } const arg_src: LazySrcLoc = if (sema.generic_call_src == .node_offset) .{ .call_arg = .{ + .decl = sema.generic_call_decl.unwrap().?, .call_node_offset = sema.generic_call_src.node_offset.x, .arg_index = param_index, - } } else .unneeded; + } } else src; if (comptime_syntax) { if (try sema.resolveMaybeUndefVal(air_ref)) |val| { @@ -9326,15 +9333,7 @@ fn zirParamAnytype( return; } const msg = msg: { - const fallback_src = src.toSrcLoc(mod.declPtr(block.src_decl), mod); - const src_loc = if (sema.generic_call_decl.unwrap()) |decl| - if (arg_src != .unneeded) - arg_src.toSrcLoc(mod.declPtr(decl), mod) - else - fallback_src - else - fallback_src; - + const src_loc = arg_src.toSrcLoc(mod.declPtr(block.src_decl), mod); const msg = try Module.ErrorMsg.create(gpa, src_loc, "{s}", .{ @as([]const u8, "runtime-known argument passed to comptime parameter"), }); @@ -9354,15 +9353,7 @@ fn zirParamAnytype( return; } const msg = msg: { - const fallback_src = src.toSrcLoc(mod.declPtr(block.src_decl), mod); - const src_loc = if (sema.generic_call_decl.unwrap()) |decl| - if (arg_src != .unneeded) - arg_src.toSrcLoc(mod.declPtr(decl), mod) - else - fallback_src - else - fallback_src; - + const src_loc = arg_src.toSrcLoc(mod.declPtr(block.src_decl), mod); const msg = try Module.ErrorMsg.create(gpa, src_loc, "{s}", .{ @as([]const u8, "runtime-known argument passed to comptime-only type parameter"), }); diff --git a/test/cases/compile_errors/array_in_c_exported_function.zig b/test/cases/compile_errors/array_in_c_exported_function.zig index e938b6afd4..0d31f69370 100644 --- a/test/cases/compile_errors/array_in_c_exported_function.zig +++ b/test/cases/compile_errors/array_in_c_exported_function.zig @@ -10,7 +10,7 @@ export fn zig_return_array() [10]u8 { // backend=stage2 // target=native // -// :1:21: error: parameter of type '[10]u8' not allowed in function with calling convention 'C' -// :1:21: note: arrays are not allowed as a parameter type +// :1:24: error: parameter of type '[10]u8' not allowed in function with calling convention 'C' +// :1:24: note: arrays are not allowed as a parameter type // :5:30: error: return type '[10]u8' not allowed in function with calling convention 'C' // :5:30: note: arrays are not allowed as a return type -- cgit v1.2.3 From 727b371bbc53b1fcabb6c6899043da3a84195b3c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 18 Jul 2023 17:50:41 -0700 Subject: Sema: fix source location crash for function prototypes --- src/Module.zig | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index 3ae78c30b1..0467688974 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -2146,9 +2146,17 @@ pub const SrcLoc = struct { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(fn_proto_param.fn_proto_node_offset); var buf: [1]Ast.Node.Index = undefined; - const fn_proto_full = tree.fullFnProto(&buf, node).?; - const src_node = fn_proto_full.ast.params[fn_proto_param.param_index]; - return nodeToSpan(tree, src_node); + const full = tree.fullFnProto(&buf, node).?; + var it = full.iterate(tree); + var i: usize = 0; + while (it.next()) |param| : (i += 1) { + if (i == fn_proto_param.param_index) { + if (param.anytype_ellipsis3) |token| return tokenToSpan(tree, token); + if (param.name_token) |token| return tokenToSpan(tree, token); + return nodeToSpan(tree, param.type_expr); + } + } + unreachable; }, .node_offset_bin_lhs => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); @@ -2502,6 +2510,10 @@ pub const SrcLoc = struct { ); } + fn tokenToSpan(tree: *const Ast, token: Ast.TokenIndex) Span { + return tokensToSpan(tree, token, token, token); + } + fn tokensToSpan(tree: *const Ast, start: Ast.TokenIndex, end: Ast.TokenIndex, main: Ast.TokenIndex) Span { const token_starts = tree.tokens.items(.start); var start_tok = start; -- cgit v1.2.3 From 8daa8d255b84880de60dc1a86d2f1d4a89463bbc Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 18 Jul 2023 18:34:14 -0700 Subject: Sema: fix fn_proto_param LazySrcLoc resolution to match source code span from merge-base. --- src/Module.zig | 11 +++++++++-- test/cases/compile_errors/array_in_c_exported_function.zig | 4 ++-- .../export_function_with_comptime_parameter.zig | 2 +- test/cases/compile_errors/export_generic_function.zig | 2 +- .../extern_function_with_comptime_parameter.zig | 2 +- 5 files changed, 14 insertions(+), 7 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index 0467688974..ea444d3cc4 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -2152,8 +2152,15 @@ pub const SrcLoc = struct { while (it.next()) |param| : (i += 1) { if (i == fn_proto_param.param_index) { if (param.anytype_ellipsis3) |token| return tokenToSpan(tree, token); - if (param.name_token) |token| return tokenToSpan(tree, token); - return nodeToSpan(tree, param.type_expr); + const first_token = param.comptime_noalias orelse + param.name_token orelse + tree.firstToken(param.type_expr); + return tokensToSpan( + tree, + first_token, + tree.lastToken(param.type_expr), + first_token, + ); } } unreachable; diff --git a/test/cases/compile_errors/array_in_c_exported_function.zig b/test/cases/compile_errors/array_in_c_exported_function.zig index 0d31f69370..e938b6afd4 100644 --- a/test/cases/compile_errors/array_in_c_exported_function.zig +++ b/test/cases/compile_errors/array_in_c_exported_function.zig @@ -10,7 +10,7 @@ export fn zig_return_array() [10]u8 { // backend=stage2 // target=native // -// :1:24: error: parameter of type '[10]u8' not allowed in function with calling convention 'C' -// :1:24: note: arrays are not allowed as a parameter type +// :1:21: error: parameter of type '[10]u8' not allowed in function with calling convention 'C' +// :1:21: note: arrays are not allowed as a parameter type // :5:30: error: return type '[10]u8' not allowed in function with calling convention 'C' // :5:30: note: arrays are not allowed as a return type diff --git a/test/cases/compile_errors/export_function_with_comptime_parameter.zig b/test/cases/compile_errors/export_function_with_comptime_parameter.zig index 8d5dbef1c3..948053534d 100644 --- a/test/cases/compile_errors/export_function_with_comptime_parameter.zig +++ b/test/cases/compile_errors/export_function_with_comptime_parameter.zig @@ -6,4 +6,4 @@ export fn foo(comptime x: anytype, y: i32) i32 { // backend=stage2 // target=native // -// :1:15: error: comptime parameters not allowed in function with calling convention 'C' +// :1:27: error: comptime parameters not allowed in function with calling convention 'C' diff --git a/test/cases/compile_errors/export_generic_function.zig b/test/cases/compile_errors/export_generic_function.zig index 4ffbad9df7..65469be6d9 100644 --- a/test/cases/compile_errors/export_generic_function.zig +++ b/test/cases/compile_errors/export_generic_function.zig @@ -7,4 +7,4 @@ export fn foo(num: anytype) i32 { // backend=stage2 // target=native // -// :1:15: error: generic parameters not allowed in function with calling convention 'C' +// :1:20: error: generic parameters not allowed in function with calling convention 'C' diff --git a/test/cases/compile_errors/extern_function_with_comptime_parameter.zig b/test/cases/compile_errors/extern_function_with_comptime_parameter.zig index fac09cc265..b8f5f0b1b8 100644 --- a/test/cases/compile_errors/extern_function_with_comptime_parameter.zig +++ b/test/cases/compile_errors/extern_function_with_comptime_parameter.zig @@ -19,5 +19,5 @@ comptime { // target=native // // :5:30: error: comptime parameters not allowed in function with calling convention 'C' -// :6:30: error: generic parameters not allowed in function with calling convention 'C' +// :6:41: error: generic parameters not allowed in function with calling convention 'C' // :1:15: error: comptime parameters not allowed in function with calling convention 'C' -- cgit v1.2.3